Separate dhfs-fs/fuse/sync-base

This commit is contained in:
2025-04-13 14:14:29 +02:00
parent 7c605135c5
commit 9178e7ee2d
220 changed files with 2081 additions and 1090 deletions

View File

@@ -0,0 +1,5 @@
*
!target/*-runner
!target/*-runner.jar
!target/lib/*
!target/quarkus-app/*

43
dhfs-parent/dhfs-fs/.gitignore vendored Normal file
View File

@@ -0,0 +1,43 @@
#Maven
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
release.properties
.flattened-pom.xml
# Eclipse
.project
.classpath
.settings/
bin/
# IntelliJ
.idea
*.ipr
*.iml
*.iws
# NetBeans
nb-configuration.xml
# Visual Studio Code
.vscode
.factorypath
# OSX
.DS_Store
# Vim
*.swp
*.swo
# patch
*.orig
*.rej
# Local environment
.env
# Plugin directory
/.quarkus/cli/plugins/

View File

@@ -0,0 +1,2 @@
FROM azul/zulu-openjdk-debian:21-jre-latest
RUN apt update && apt install -y libfuse2 curl

View File

@@ -0,0 +1,43 @@
version: "3.2"
services:
dhfs1:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs1:/dhfs_root
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-jar /app/quarkus-run.jar"
ports:
- 8080:8080
- 8081:8443
- 5005:5005
dhfs2:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs2:/dhfs_root
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
--add-opens=java.base/java.nio=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
-jar /app/quarkus-run.jar"
ports:
- 8090:8080
- 8091:8443
- 5010:5010

204
dhfs-parent/dhfs-fs/pom.xml Normal file
View File

@@ -0,0 +1,204 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.SerCeMan</groupId>
<artifactId>jnr-fuse</artifactId>
<version>44ed40f8ce</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-ffi</artifactId>
<version>2.2.16</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-posix</artifactId>
<version>3.1.19</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-constants</artifactId>
<version>0.10.4</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>kleppmanntree</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>supportlib</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>objects</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>sync-base</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
true
</junit.jupiter.execution.parallel.enabled>
<junit.jupiter.execution.parallel.mode.default>
concurrent
</junit.jupiter.execution.parallel.mode.default>
<junit.jupiter.execution.parallel.config.dynamic.factor>
0.5
</junit.jupiter.execution.parallel.config.dynamic.factor>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,97 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
# We make four distinct layers so if there are application changes the library layers can be re-used
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
COPY --chown=185 target/quarkus-app/*.jar /deployments/
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -0,0 +1,93 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
COPY target/lib/* /deployments/lib/
COPY target/*-runner.jar /deployments/quarkus-run.jar
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -0,0 +1,27 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -0,0 +1,30 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
# It uses a micro base image, tuned for Quarkus native executables.
# It reduces the size of the resulting container image.
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM quay.io/quarkus/quarkus-micro-image:2.0
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -0,0 +1,13 @@
package com.usatiuk.dhfs.files.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.repository.JDataRemoteDto;
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
@Override
public int estimateSize() {
return data.size();
}
}

View File

@@ -0,0 +1,26 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.ProtoSerializer;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.persistence.ChunkDataP;
import com.usatiuk.dhfs.persistence.JObjectKeyP;
import jakarta.inject.Singleton;
@Singleton
public class ChunkDataProtoSerializer implements ProtoSerializer<ChunkDataP, ChunkData> {
@Override
public ChunkData deserialize(ChunkDataP message) {
return new ChunkData(
JObjectKey.of(message.getKey().getName()),
message.getData()
);
}
@Override
public ChunkDataP serialize(ChunkData object) {
return ChunkDataP.newBuilder()
.setKey(JObjectKeyP.newBuilder().setName(object.key().name()).build())
.setData(object.data())
.build();
}
}

View File

@@ -0,0 +1,47 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.jmap.JMapHolder;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.repository.JDataRemoteDto;
import java.util.Collection;
import java.util.Set;
public record File(JObjectKey key, long mode, long cTime, long mTime,
boolean symlink
) implements JDataRemote, JMapHolder<JMapLongKey> {
public File withSymlink(boolean symlink) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withMode(long mode) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withCTime(long cTime) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withMTime(long mTime) {
return new File(key, mode, cTime, mTime, symlink);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return Set.of();
// return Set.copyOf(chunks().values());
}
@Override
public int estimateSize() {
return 64;
// return chunks.size() * 64;
}
@Override
public Class<? extends JDataRemoteDto> dtoClass() {
return FileDto.class;
}
}

View File

@@ -0,0 +1,15 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.repository.JDataRemoteDto;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
@Override
public Class<? extends JDataRemote> objClass() {
return File.class;
}
}

View File

@@ -0,0 +1,24 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.repository.syncmap.DtoMapper;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
@ApplicationScoped
public class FileDtoMapper implements DtoMapper<File, FileDto> {
@Inject
JMapHelper jMapHelper;
@Inject
FileHelper fileHelper;
@Override
public FileDto toDto(File obj) {
return new FileDto(obj, fileHelper.getChunks(obj));
}
@Override
public File fromDto(FileDto dto) {
throw new UnsupportedOperationException();
}
}

View File

@@ -0,0 +1,36 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import java.util.ArrayList;
import java.util.List;
@ApplicationScoped
public class FileHelper {
@Inject
JMapHelper jMapHelper;
public List<Pair<Long, JObjectKey>> getChunks(File file) {
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
try (var it = jMapHelper.getIterator(file)) {
while (it.hasNext()) {
var cur = it.next();
chunks.add(Pair.of(cur.getKey().key(), cur.getValue().ref()));
}
}
return List.copyOf(chunks);
}
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
jMapHelper.deleteAll(file);
for (var f : chunks) {
jMapHelper.put(file, JMapLongKey.of(f.getLeft()), f.getRight());
}
}
}

View File

@@ -0,0 +1,25 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.ProtoSerializer;
import com.usatiuk.dhfs.persistence.FileDtoP;
import com.usatiuk.dhfs.utils.SerializationHelper;
import jakarta.inject.Singleton;
import java.io.IOException;
@Singleton
public class FileProtoSerializer implements ProtoSerializer<FileDtoP, FileDto> {
@Override
public FileDto deserialize(FileDtoP message) {
try (var is = message.getSerializedData().newInput()) {
return SerializationHelper.deserialize(is);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public FileDtoP serialize(FileDto object) {
return FileDtoP.newBuilder().setSerializedData(SerializationHelper.serialize(object)).build();
}
}

View File

@@ -0,0 +1,241 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.RemoteObjectDataWrapper;
import com.usatiuk.dhfs.RemoteObjectMeta;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.repository.ObjSyncHandler;
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
import com.usatiuk.dhfs.repository.SyncHelper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.pcollections.HashPMap;
import org.pcollections.HashTreePMap;
import org.pcollections.PMap;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Objects;
@ApplicationScoped
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
@Inject
Transaction curTx;
@Inject
PersistentPeerDataService persistentPeerDataService;
@Inject
JMapHelper jMapHelper;
@Inject
RemoteTransaction remoteTx;
@Inject
FileHelper fileHelper;
@Inject
JKleppmannTreeManager jKleppmannTreeManager;
@Inject
DhfsFileService fileService;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
}
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
@Nullable FileDto receivedData) {
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
if (!oursCurMeta.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + oursCurMeta.knownType() + " vs " + File.class);
if (!oursCurMeta.knownType().equals(File.class))
oursCurMeta = oursCurMeta.withKnownType(File.class);
curTx.put(oursCurMeta);
var oursCurFile = remoteTx.getDataLocal(File.class, key).orElse(null);
if (oursCurFile == null)
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
var theirsFile = receivedData.file();
var oursChunks = fileHelper.getChunks(oursCurFile);
File first;
File second;
List<Pair<Long, JObjectKey>> firstChunks;
List<Pair<Long, JObjectKey>> secondChunks;
PeerId otherHostname;
if (oursCurFile.mTime() >= theirsFile.mTime()) {
first = oursCurFile;
firstChunks = oursChunks;
second = theirsFile;
secondChunks = receivedData.chunks();
otherHostname = from;
} else {
second = oursCurFile;
secondChunks = oursChunks;
first = theirsFile;
firstChunks = receivedData.chunks();
otherHostname = persistentPeerDataService.getSelfUuid();
}
Log.tracev("Conflict resolution: ours: {0}, theirs: {1}, chunks: {2}, {3}", oursCurFile, theirsFile, oursChunks, receivedData.chunks());
Log.tracev("Conflict resolution: first: {0}, second: {1}, chunks: {2}, {3}", first, second, firstChunks, secondChunks);
HashPMap<PeerId, Long> newChangelog = HashTreePMap.from(oursCurMeta.changelog());
for (var entry : receivedChangelog.entrySet()) {
newChangelog = newChangelog.plus(entry.getKey(),
Long.max(newChangelog.getOrDefault(entry.getKey(), 0L), entry.getValue())
);
}
oursCurMeta = oursCurMeta.withChangelog(newChangelog);
curTx.put(oursCurMeta);
boolean chunksDiff = !Objects.equals(firstChunks, secondChunks);
boolean wasChanged = first.mTime() != second.mTime()
|| first.cTime() != second.cTime()
|| first.mode() != second.mode()
|| first.symlink() != second.symlink()
|| chunksDiff;
if (wasChanged) {
oursCurMeta = oursCurMeta.withChangelog(
newChangelog.plus(persistentPeerDataService.getSelfUuid(), newChangelog.getOrDefault(persistentPeerDataService.getSelfUuid(), 0L) + 1)
);
curTx.put(oursCurMeta);
remoteTx.putDataRaw(oursCurFile.withCTime(first.cTime()).withMTime(first.mTime()).withMode(first.mode()).withSymlink(first.symlink()));
fileHelper.replaceChunks(oursCurFile, firstChunks);
var newFile = new File(JObjectKey.random(), second.mode(), second.cTime(), second.mTime(), second.symlink());
remoteTx.putData(newFile);
fileHelper.replaceChunks(newFile, secondChunks);
var parent = fileService.inoToParent(oursCurFile.key());
int i = 0;
do {
try {
getTreeW().move(parent.getRight(),
new JKleppmannTreeNodeMetaFile(
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
newFile.key()
),
getTreeW().getNewNodeId()
);
} catch (AlreadyExistsException aex) {
i++;
continue;
}
break;
} while (true);
}
var curKnownRemoteVersion = oursCurMeta.knownRemoteVersions().get(from);
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
oursCurMeta = oursCurMeta.withKnownRemoteVersions(oursCurMeta.knownRemoteVersions().plus(from, receivedTotalVer));
curTx.put(oursCurMeta);
}
}
@Override
public void handleRemoteUpdate(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
@Nullable FileDto receivedData) {
var current = curTx.get(RemoteObjectMeta.class, key).orElse(null);
if (current == null) {
current = new RemoteObjectMeta(key, HashTreePMap.empty());
curTx.put(current);
}
var changelogCompare = SyncHelper.compareChangelogs(current.changelog(), receivedChangelog);
switch (changelogCompare) {
case EQUAL -> {
Log.debug("No action on update: " + key + " from " + from);
if (!current.hasLocalData() && receivedData != null) {
current = current.withHaveLocal(true);
curTx.put(current);
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
if (!current.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
if (!current.knownType().equals(File.class))
current = current.withKnownType(File.class);
curTx.put(current);
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
}
}
case NEWER -> {
Log.debug("Received newer index update than known: " + key + " from " + from);
var newChangelog = receivedChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ?
receivedChangelog : receivedChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L);
current = current.withChangelog(newChangelog);
if (receivedData != null) {
current = current.withHaveLocal(true);
curTx.put(current);
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
if (!current.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
if (!current.knownType().equals(File.class))
current = current.withKnownType(File.class);
curTx.put(current);
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
} else {
current = current.withHaveLocal(false);
curTx.put(current);
}
}
case OLDER -> {
Log.debug("Received older index update than known: " + key + " from " + from);
return;
}
case CONFLICT -> {
Log.debug("Conflict on update (inconsistent version): " + key + " from " + from);
assert receivedData != null;
resolveConflict(from, key, receivedChangelog, receivedData);
// TODO:
return;
}
}
var curKnownRemoteVersion = current.knownRemoteVersions().get(from);
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
current = current.withKnownRemoteVersions(current.knownRemoteVersions().plus(from, receivedTotalVer));
curTx.put(current);
}
}
}

View File

@@ -0,0 +1,48 @@
package com.usatiuk.dhfs.files.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.objects.JObjectKey;
import org.apache.commons.lang3.tuple.Pair;
import java.util.Optional;
public interface DhfsFileService {
Optional<JObjectKey> open(String name);
Optional<JObjectKey> create(String name, long mode);
Pair<String, JObjectKey> inoToParent(JObjectKey ino);
void mkdir(String name, long mode);
Optional<GetattrRes> getattr(JObjectKey name);
Boolean chmod(JObjectKey name, long mode);
void unlink(String name);
Boolean rename(String from, String to);
Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs);
Iterable<String> readDir(String name);
long size(JObjectKey fileUuid);
Optional<ByteString> read(JObjectKey fileUuid, long offset, int length);
Long write(JObjectKey fileUuid, long offset, ByteString data);
default Long write(JObjectKey fileUuid, long offset, byte[] data) {
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
}
Boolean truncate(JObjectKey fileUuid, long length);
String readlink(JObjectKey uuid);
ByteString readlinkBS(JObjectKey uuid);
JObjectKey symlink(String oldpath, String newpath);
}

View File

@@ -0,0 +1,654 @@
package com.usatiuk.dhfs.files.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.dhfs.RemoteObjectMeta;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.files.objects.ChunkData;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.jmap.JMapEntry;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.*;
import java.util.stream.StreamSupport;
@ApplicationScoped
public class DhfsFileServiceImpl implements DhfsFileService {
@Inject
Transaction curTx;
@Inject
RemoteTransaction remoteTx;
@Inject
TransactionManager jObjectTxManager;
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
boolean useHashForChunks;
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
boolean allowRecursiveDelete;
@ConfigProperty(name = "dhfs.objects.ref_verification")
boolean refVerification;
@ConfigProperty(name = "dhfs.objects.write_log")
boolean writeLogging;
@Inject
JKleppmannTreeManager jKleppmannTreeManager;
@Inject
JMapHelper jMapHelper;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
}
private ChunkData createChunk(ByteString bytes) {
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
remoteTx.putData(newChunk);
return newChunk;
}
void init(@Observes @Priority(500) StartupEvent event) {
Log.info("Initializing file service");
getTreeW();
}
private JKleppmannTreeNode getDirEntryW(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private JKleppmannTreeNode getDirEntryR(String name) {
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) return Optional.empty();
var ret = curTx.get(JKleppmannTreeNode.class, res);
return ret;
}
@Override
public Optional<GetattrRes> getattr(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var ref = curTx.get(JData.class, uuid).orElse(null);
if (ref == null) return Optional.empty();
GetattrRes ret;
if (ref instanceof RemoteObjectMeta r) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
} else if (ref instanceof JKleppmannTreeNode) {
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
return Optional.of(ret);
});
}
@Override
public Optional<JObjectKey> open(String name) {
return jObjectTxManager.executeTx(() -> {
try {
var ret = getDirEntryR(name);
return switch (ret.meta()) {
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno());
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
default -> Optional.empty();
};
} catch (StatusRuntimeException e) {
if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
return Optional.empty();
}
throw e;
}
});
}
private void ensureDir(JKleppmannTreeNode entry) {
if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory))
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
}
@Override
public Optional<JObjectKey> create(String name, long mode) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String fname = path.getFileName().toString();
var fuuid = UUID.randomUUID();
Log.debug("Creating file " + fuuid);
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
remoteTx.putData(f);
try {
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
} catch (Exception e) {
// fobj.getMeta().removeRef(newNodeId);
throw e;
}
return Optional.of(f.key());
});
}
//FIXME: Slow..
@Override
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
return jObjectTxManager.executeTx(() -> {
return getTreeW().findParent(w -> {
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
return f.getFileIno().equals(ino);
return false;
});
});
}
@Override
public void mkdir(String name, long mode) {
jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String dname = path.getFileName().toString();
Log.debug("Creating directory " + name);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
});
}
@Override
public void unlink(String name) {
jObjectTxManager.executeTx(() -> {
var node = getDirEntryOpt(name).orElse(null);
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
if (!allowRecursiveDelete && !node.children().isEmpty())
throw new DirectoryNotEmptyException();
}
getTreeW().trash(node.meta(), node.key());
});
}
@Override
public Boolean rename(String from, String to) {
return jObjectTxManager.executeTx(() -> {
var node = getDirEntryW(from);
JKleppmannTreeNodeMeta meta = node.meta();
var toPath = Path.of(to);
var toDentry = getDirEntryW(toPath.getParent().toString());
ensureDir(toDentry);
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
return true;
});
}
@Override
public Boolean chmod(JObjectKey uuid, long mode) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
if (dent instanceof JKleppmannTreeNode) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withMode(mode).withMTime(System.currentTimeMillis()));
return true;
} else {
throw new IllegalArgumentException(uuid + " is not a file");
}
} else {
throw new IllegalArgumentException(uuid + " is not a file");
}
});
}
@Override
public Iterable<String> readDir(String name) {
return jObjectTxManager.executeTx(() -> {
var found = getDirEntryW(name);
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
return found.children().keySet();
});
}
@Override
public Optional<ByteString> read(JObjectKey fileUuid, long offset, int length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
return Optional.empty();
}
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
if (!it.hasNext())
return Optional.of(ByteString.empty());
// if (it.peekNextKey().key() != offset) {
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
// return Optional.of(ByteString.empty());
// }
long curPos = offset;
ByteString buf = ByteString.empty();
var chunk = it.next();
while (curPos < offset + length) {
var chunkPos = chunk.getKey().key();
long offInChunk = curPos - chunkPos;
long toReadInChunk = (offset + length) - curPos;
var chunkBytes = readChunk(chunk.getValue().ref());
long readableLen = chunkBytes.size() - offInChunk;
var toReadReally = Math.min(readableLen, toReadInChunk);
if (toReadReally < 0) break;
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
curPos += toReadReally;
if (readableLen > toReadInChunk)
break;
if (!it.hasNext()) break;
chunk = it.next();
}
return Optional.of(buf);
} catch (Exception e) {
Log.error("Error reading file: " + fileUuid, e);
return Optional.empty();
}
});
}
private ByteString readChunk(JObjectKey uuid) {
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
if (chunkRead == null) {
Log.error("Chunk requested not found: " + uuid);
throw new StatusRuntimeException(Status.NOT_FOUND);
}
return chunkRead.data();
}
private int getChunkSize(JObjectKey uuid) {
return readChunk(uuid).size();
}
private long alignDown(long num, long n) {
return num & -(1L << n);
}
@Override
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
return jObjectTxManager.executeTx(() -> {
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
if (file == null) {
Log.error("File not found when trying to write: " + fileUuid);
return -1L;
}
if (writeLogging) {
Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " "
+ offset + " " + data.size());
}
if (size(fileUuid) < offset) {
truncate(fileUuid, offset);
file = remoteTx.getData(File.class, fileUuid).orElse(null);
}
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
long writeEnd = offset + data.size();
long start = realOffset;
ByteString pendingPrefix = ByteString.empty();
ByteString pendingSuffix = ByteString.empty();
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
while (it.hasNext()) {
var curEntry = it.next();
long curChunkStart = curEntry.getKey().key();
var curChunkId = curEntry.getValue().ref();
long curChunkEnd = curChunkStart + getChunkSize(curChunkId);
if (curChunkEnd <= realOffset) break;
removedChunks.put(curEntry.getKey().key(), curChunkId);
if (curChunkStart < offset) {
if (curChunkStart < start)
start = curChunkStart;
var readChunk = readChunk(curChunkId);
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
}
if (curChunkEnd > writeEnd) {
var readChunk = readChunk(curChunkId);
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
}
if (curChunkEnd >= writeEnd) break;
}
}
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
int combinedSize = pendingWrites.size();
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
{
int targetChunkSize = 1 << targetChunkAlignment;
int cur = 0;
while (cur < combinedSize) {
int end;
if (targetChunkAlignment < 0)
end = combinedSize;
else
end = Math.min(cur + targetChunkSize, combinedSize);
var thisChunk = pendingWrites.substring(cur, end);
ChunkData newChunkData = createChunk(thisChunk);
newChunks.put(start, newChunkData.key());
start += thisChunk.size();
cur = end;
}
}
for (var e : removedChunks.entrySet()) {
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file);
return (long) data.size();
});
}
@Override
public Boolean truncate(JObjectKey fileUuid, long length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to write: " + fileUuid);
return false;
}
if (length == 0) {
jMapHelper.deleteAll(file);
remoteTx.putData(file);
return true;
}
var curSize = size(fileUuid);
if (curSize == length) return true;
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
if (curSize < length) {
long combinedSize = (length - curSize);
long start = curSize;
// Hack
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
{
long cur = 0;
while (cur < combinedSize) {
long end;
if (targetChunkSize <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
end = cur + targetChunkSize;
} else {
end = combinedSize;
}
}
if (!zeroCache.containsKey(end - cur))
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])));
ChunkData newChunkData = zeroCache.get(end - cur);
newChunks.put(start, newChunkData.key());
start += newChunkData.data().size();
cur = end;
}
}
} else {
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) {
last = it.hasNext() ? it.next() : null;
while (it.hasNext()) {
var next = it.next();
removedChunks.put(next.getKey().key(), next.getValue().ref());
}
}
removedChunks.put(last.getKey().key(), last.getValue().ref());
//
// NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
//
// long start = 0;
//
// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
// first = it.hasNext() ? it.next() : null;
// boolean empty = last == null;
// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
// first = null;
// last = null;
// start = offset;
// } else if (!empty) {
// assert first != null;
// removedChunks.put(first.getKey().key(), first.getValue().ref());
// while (it.hasNext() && it.peekNextKey() != last.getKey()) {
// var next = it.next();
// removedChunks.put(next.getKey().key(), next.getValue().ref());
// }
// removedChunks.put(last.getKey().key(), last.getValue().ref());
// }
// }
//
// var tail = chunksAll.lowerEntry(length);
// var afterTail = chunksAll.tailMap(tail.getKey(), false);
//
// removedChunks.put(tail.getKey(), tail.getValue());
// removedChunks.putAll(afterTail);
var tailBytes = readChunk(last.getValue().ref());
var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key()));
ChunkData newChunkData = createChunk(newChunk);
newChunks.put(last.getKey().key(), newChunkData.key());
}
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
for (var e : removedChunks.entrySet()) {
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file);
return true;
});
}
@Override
public String readlink(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
return readlinkBS(uuid).toStringUtf8();
});
}
@Override
public ByteString readlinkBS(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
return read(uuid, 0, Math.toIntExact(size(uuid))).get();
});
}
@Override
public JObjectKey symlink(String oldpath, String newpath) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(newpath);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String fname = path.getFileName().toString();
var fuuid = UUID.randomUUID();
Log.debug("Creating file " + fuuid);
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
remoteTx.putData(f);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
return f.key();
});
}
@Override
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
// FIXME:
if (dent instanceof JKleppmannTreeNode) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
return true;
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
});
}
@Override
public long size(JObjectKey fileUuid) {
return jObjectTxManager.executeTx(() -> {
long realSize = 0;
var file = remoteTx.getData(File.class, fileUuid)
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
last = it.hasNext() ? it.next() : null;
}
if (last != null) {
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
}
return realSize;
});
}
}

View File

@@ -0,0 +1,8 @@
package com.usatiuk.dhfs.files.service;
public class DirectoryNotEmptyException extends RuntimeException {
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
}

View File

@@ -0,0 +1,4 @@
package com.usatiuk.dhfs.files.service;
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
}

View File

@@ -0,0 +1,7 @@
package com.usatiuk.dhfs.files.service;
public enum GetattrType {
FILE,
DIRECTORY,
SYMLINK
}

View File

@@ -0,0 +1,34 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.port=42069
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.peerdiscovery.broadcast=true
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.objects.reconnect_interval=5s
dhfs.objects.write_log=false
dhfs.objects.periodic-push-op-interval=5m
dhfs.fuse.root=${HOME}/dhfs_default/fuse
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=2097152
dhfs.files.target_chunk_alignment=19
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.files.use_hash_for_chunks=false
dhfs.objects.autosync.threads=16
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=16
dhfs.objects.ref-processor.threads=16
dhfs.objects.opsender.batch-size=100
dhfs.objects.lock_timeout_secs=2
dhfs.local-discovery=true
dhfs.peerdiscovery.timeout=10000
quarkus.log.category."com.usatiuk".min-level=TRACE
quarkus.log.category."com.usatiuk".level=TRACE
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required

View File

@@ -0,0 +1,29 @@
package com.usatiuk.dhfs;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -0,0 +1,40 @@
package com.usatiuk.dhfs;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
}
}

View File

@@ -0,0 +1,83 @@
package com.usatiuk.dhfs.benchmarks;
import io.quarkus.logging.Log;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import java.util.Arrays;
import java.util.function.Supplier;
public class Benchmarker {
static <T> long[] runLatency(Supplier<T> fn, int iterations) {
var out = new long[iterations];
int hash = 1;
for (int i = 0; i < iterations; i++) {
long startNanos = System.nanoTime();
var cur = fn.get();
long stopNanos = System.nanoTime();
out[i] = stopNanos - startNanos;
hash = hash * 31 + cur.hashCode();
}
System.out.println("\nHash: " + hash);
return out;
}
static <T> long[] runThroughput(Supplier<T> fn, int iterations, long iterationTime) {
var out = new long[iterations];
int hash = 1;
for (int i = 0; i < iterations; i++) {
long startMillis = System.currentTimeMillis();
long count = 0;
// FIXME: That's probably janky
while (System.currentTimeMillis() - startMillis < iterationTime) {
var res = fn.get();
count++;
hash = hash * 31 + res.hashCode();
}
System.out.println("Ran iteration " + i + "/" + iterations + " count=" + count);
out[i] = count;
}
System.out.println("\nHash: " + hash);
return out;
}
static void printStats(double[] data, String unit) {
DescriptiveStatistics stats = new DescriptiveStatistics();
for (var r : data) {
stats.addValue(r);
}
Log.info("\n" + stats +
"\n 50%: " + stats.getPercentile(50) + " " + unit +
"\n 90%: " + stats.getPercentile(90) + " " + unit +
"\n 95%: " + stats.getPercentile(95) + " " + unit +
"\n 99%: " + stats.getPercentile(99) + " " + unit +
"\n 99.9%: " + stats.getPercentile(99.9) + " " + unit +
"\n 99.99%: " + stats.getPercentile(99.99) + " " + unit
);
}
static <T> void runAndPrintMixSimple(String name, Supplier<T> fn, int latencyIterations, int thrptIterations, int thrptIterationTime, int warmupIterations, int warmupIterationTime) {
System.out.println("\n=========\n" + "Running " + name + "\n=========\n");
System.out.println("==Warmup==");
runThroughput(fn, warmupIterations, warmupIterationTime);
System.out.println("==Warmup done==");
System.out.println("==Throughput==");
var thrpt = runThroughput(fn, thrptIterations, thrptIterationTime);
printStats(Arrays.stream(thrpt).mapToDouble(o -> (double) o / 1000).toArray(), "ops/s");
System.out.println("==Throughput done==");
System.out.println("==Latency==");
var lat = runLatency(fn, latencyIterations);
printStats(Arrays.stream(lat).mapToDouble(o -> (double) o).toArray(), "ns/op");
System.out.println("==Latency done==");
System.out.println("\n=========\n" + name + " done" + "\n=========\n");
}
}

View File

@@ -0,0 +1,53 @@
package com.usatiuk.dhfs.benchmarks;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.TempDataProfile;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.objects.JObjectKey;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.util.Map;
class Profiles {
public static class DhfsFuseTestProfile extends TempDataProfile {
@Override
protected void getConfigOverrides(Map<String, String> ret) {
ret.put("quarkus.log.category.\"com.usatiuk.dhfs\".level", "INFO");
ret.put("dhfs.fuse.enabled", "false");
ret.put("dhfs.objects.ref_verification", "false");
}
}
}
@QuarkusTest
@TestProfile(Profiles.DhfsFuseTestProfile.class)
public class DhfsFileBenchmarkTest {
@Inject
DhfsFileService dhfsFileService;
@Test
@Disabled
void openRootTest() {
Benchmarker.runAndPrintMixSimple("dhfsFileService.open(\"\")",
() -> {
return dhfsFileService.open("");
}, 1_000_000, 5, 1000, 5, 1000);
}
@Test
@Disabled
void writeMbTest() {
JObjectKey file = dhfsFileService.create("/writeMbTest", 0777).get();
var bb = ByteBuffer.allocateDirect(1024 * 1024);
Benchmarker.runAndPrintMixSimple("dhfsFileService.write(\"\")",
() -> {
var thing = UnsafeByteOperations.unsafeWrap(bb);
return dhfsFileService.write(file, dhfsFileService.size(file), thing);
}, 1_000, 10, 100, 1, 100);
}
}

View File

@@ -0,0 +1,9 @@
package com.usatiuk.dhfs.files;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
@QuarkusTest
@TestProfile(Profiles.DhfsFileServiceSimpleTestProfile.class)
public class DhfsFileServiceSimpleTest extends DhfsFileServiceSimpleTestImpl {
}

View File

@@ -0,0 +1,298 @@
package com.usatiuk.dhfs.files;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.TempDataProfile;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.RepeatedTest;
import org.junit.jupiter.api.Test;
import java.util.Map;
class Profiles {
public static class DhfsFileServiceSimpleTestProfile extends TempDataProfile {
@Override
protected void getConfigOverrides(Map<String, String> ret) {
ret.put("dhfs.fuse.enabled", "false");
}
}
public static class DhfsFileServiceSimpleTestProfileNoChunking extends TempDataProfile {
@Override
protected void getConfigOverrides(Map<String, String> ret) {
ret.put("dhfs.fuse.enabled", "false");
ret.put("dhfs.files.target_chunk_size", "-1");
ret.put("dhfs.files.target_chunk_alignment", "-1");
}
}
public static class DhfsFileServiceSimpleTestProfileSmallChunking extends TempDataProfile {
@Override
protected void getConfigOverrides(Map<String, String> ret) {
ret.put("dhfs.fuse.enabled", "false");
ret.put("dhfs.files.target_chunk_size", "3");
ret.put("dhfs.files.target_chunk_alignment", "2");
}
}
}
public abstract class DhfsFileServiceSimpleTestImpl {
@Inject
DhfsFileService fileService;
@Inject
Transaction curTx;
@Inject
TransactionManager jObjectTxManager;
@Inject
RemoteTransaction remoteTx;
// @Test
// void readTest() {
// var fuuid = UUID.randomUUID();
// {
// ChunkData c1 = new ChunkData(ByteString.copyFrom("12345".getBytes()));
// ChunkData c2 = new ChunkData(ByteString.copyFrom("678".getBytes()));
// ChunkData c3 = new ChunkData(ByteString.copyFrom("91011".getBytes()));
// File f = new File(fuuid, 777, false);
// f.chunks().put(0L, c1.getName());
// f.chunks().put((long) c1.getBytes().size(), c2.getName());
// f.chunks().put((long) c1.getBytes().size() + c2.getBytes().size(), c3.getName());
//
// // FIXME: dhfs_files
//
// var c1o = new AtomicReference<String>();
// var c2o = new AtomicReference<String>();
// var c3o = new AtomicReference<String>();
// var fo = new AtomicReference<String>();
//
// jObjectTxManager.executeTx(() -> {
// c1o.set(curTx.put(c1, Optional.of(f.getName())).getMeta().getName());
// c2o.set(curTx.put(c2, Optional.of(f.getName())).getMeta().getName());
// c3o.set(curTx.put(c3, Optional.of(f.getName())).getMeta().getName());
// fo.set(curTx.put(f, Optional.empty()).getMeta().getName());
// });
//
// var all = jObjectManager.findAll();
// Assertions.assertTrue(all.contains(c1o.get()));
// Assertions.assertTrue(all.contains(c2o.get()));
// Assertions.assertTrue(all.contains(c3o.get()));
// Assertions.assertTrue(all.contains(fo.get()));
// }
//
// String all = "1234567891011";
//
// {
// for (int start = 0; start < all.length(); start++) {
// for (int end = start; end <= all.length(); end++) {
// var read = fileService.read(fuuid.toString(), start, end - start);
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray());
// }
// }
// }
// }
@RepeatedTest(100)
void dontMkdirTwiceTest() {
Assertions.assertDoesNotThrow(() -> fileService.mkdir("/dontMkdirTwiceTest", 777));
Assertions.assertThrows(AlreadyExistsException.class, () -> fileService.mkdir("/dontMkdirTwiceTest", 777));
fileService.unlink("/dontMkdirTwiceTest");
Assertions.assertFalse(fileService.open("/dontMkdirTwiceTest").isPresent());
}
@RepeatedTest(100)
void writeTest() {
var ret = fileService.create("/writeTest", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).get().toByteArray());
fileService.write(uuid, 4, new byte[]{10, 11, 12});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.write(uuid, 10, new byte[]{13, 14});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
fileService.write(uuid, 6, new byte[]{15, 16});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
fileService.write(uuid, 3, new byte[]{17, 18});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
fileService.unlink("/writeTest");
Assertions.assertFalse(fileService.open("/writeTest").isPresent());
}
@Test
void removeTest() {
var ret = fileService.create("/removeTest", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.unlink("/removeTest");
Assertions.assertFalse(fileService.open("/removeTest").isPresent());
}
@Test
void truncateTest1() {
var ret = fileService.create("/truncateTest1", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.truncate(uuid, 20);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
}
@RepeatedTest(100)
void truncateTest2() {
var ret = fileService.create("/truncateTest2", 777);
try {
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.truncate(uuid, 20);
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
} finally {
fileService.unlink("/truncateTest2");
}
}
@Test
void truncateTest3() {
var ret = fileService.create("/truncateTest3", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.truncate(uuid, 7);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray());
}
@Test
void moveTest() {
var ret = fileService.create("/moveTest", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest"));
Assertions.assertFalse(fileService.open("/moveTest").isPresent());
Assertions.assertTrue(fileService.open("/movedTest").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray());
}
@Test
void moveOverTest() throws InterruptedException {
var ret = fileService.create("/moveOverTest1", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
var ret2 = fileService.create("/moveOverTest2", 777);
Assertions.assertTrue(ret2.isPresent());
var uuid2 = ret2.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29});
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray());
jObjectTxManager.run(() -> {
var oldfile = remoteTx.getData(File.class, ret2.get()).orElseThrow(IllegalStateException::new);
// var chunk = oldfile.chunks().get(0L);
// var chunkObj = remoteTx.getData(ChunkData.class, chunk).orElseThrow(IllegalStateException::new);
});
Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2"));
Assertions.assertFalse(fileService.open("/moveOverTest1").isPresent());
Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray());
// await().atMost(5, TimeUnit.SECONDS).until(() -> {
// jObjectTxManager.run(() -> {
//
// return chunkObj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY,
// (m, d) -> !m.getReferrers().contains(uuid));
// });
// });
}
@Test
void readOverSizeTest() {
var ret = fileService.create("/readOverSizeTest", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray());
}
@Test
void writeOverSizeTest() {
var ret = fileService.create("/writeOverSizeTest", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19});
Assertions.assertArrayEquals(new byte[]{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
}, fileService.read(uuid, 0, 30).get().toByteArray());
}
@Test
void moveTest2() throws InterruptedException {
var ret = fileService.create("/moveTest2", 777);
Assertions.assertTrue(ret.isPresent());
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
// var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new);
// var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0);
// var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new);
//
// chunkObj.runReadLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
// Assertions.assertTrue(m.getReferrers().contains(uuid));
// });
Assertions.assertTrue(fileService.rename("/moveTest2", "/movedTest2"));
Assertions.assertFalse(fileService.open("/moveTest2").isPresent());
Assertions.assertTrue(fileService.open("/movedTest2").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray());
}
}

View File

@@ -0,0 +1,9 @@
package com.usatiuk.dhfs.files;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
@QuarkusTest
@TestProfile(Profiles.DhfsFileServiceSimpleTestProfileNoChunking.class)
public class DhfsFileServiceSimpleTestNoChunkingTest extends DhfsFileServiceSimpleTestImpl {
}

View File

@@ -0,0 +1,9 @@
package com.usatiuk.dhfs.files;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
@QuarkusTest
@TestProfile(Profiles.DhfsFileServiceSimpleTestProfileSmallChunking.class)
public class DhfsFileServiceSimpleTestSmallChunkingTest extends DhfsFileServiceSimpleTestImpl {
}

View File

@@ -0,0 +1,12 @@
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false
dhfs.objects.persistence.snapshot-extra-checks=true