mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-29 04:57:48 +01:00
Compare commits
2 Commits
87c404828c
...
0849df60ae
| Author | SHA1 | Date | |
|---|---|---|---|
| 0849df60ae | |||
| 9cb5c226f9 |
2
.github/workflows/server.yml
vendored
2
.github/workflows/server.yml
vendored
@@ -55,7 +55,7 @@ jobs:
|
|||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: DHFS Server Package
|
name: DHFS Server Package
|
||||||
path: dhfs-parent/dhfs-app/target/quarkus-app
|
path: dhfs-parent/dhfs-fuse/target/quarkus-app
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
<component name="ProjectRunConfigurationManager">
|
<component name="ProjectRunConfigurationManager">
|
||||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main" />
|
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||||
<module name="dhfs-app" />
|
<module name="dhfs-fuse" />
|
||||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||||
<extension name="coverage">
|
<extension name="coverage">
|
||||||
<pattern>
|
<pattern>
|
||||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
<component name="ProjectRunConfigurationManager">
|
<component name="ProjectRunConfigurationManager">
|
||||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
||||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main" />
|
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||||
<module name="dhfs-app" />
|
<module name="dhfs-fuse" />
|
||||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||||
<extension name="coverage">
|
<extension name="coverage">
|
||||||
<pattern>
|
<pattern>
|
||||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
*
|
|
||||||
!target/*-runner
|
|
||||||
!target/*-runner.jar
|
|
||||||
!target/lib/*
|
|
||||||
!target/quarkus-app/*
|
|
||||||
43
dhfs-parent/dhfs-app/.gitignore
vendored
43
dhfs-parent/dhfs-app/.gitignore
vendored
@@ -1,43 +0,0 @@
|
|||||||
#Maven
|
|
||||||
target/
|
|
||||||
pom.xml.tag
|
|
||||||
pom.xml.releaseBackup
|
|
||||||
pom.xml.versionsBackup
|
|
||||||
release.properties
|
|
||||||
.flattened-pom.xml
|
|
||||||
|
|
||||||
# Eclipse
|
|
||||||
.project
|
|
||||||
.classpath
|
|
||||||
.settings/
|
|
||||||
bin/
|
|
||||||
|
|
||||||
# IntelliJ
|
|
||||||
.idea
|
|
||||||
*.ipr
|
|
||||||
*.iml
|
|
||||||
*.iws
|
|
||||||
|
|
||||||
# NetBeans
|
|
||||||
nb-configuration.xml
|
|
||||||
|
|
||||||
# Visual Studio Code
|
|
||||||
.vscode
|
|
||||||
.factorypath
|
|
||||||
|
|
||||||
# OSX
|
|
||||||
.DS_Store
|
|
||||||
|
|
||||||
# Vim
|
|
||||||
*.swp
|
|
||||||
*.swo
|
|
||||||
|
|
||||||
# patch
|
|
||||||
*.orig
|
|
||||||
*.rej
|
|
||||||
|
|
||||||
# Local environment
|
|
||||||
.env
|
|
||||||
|
|
||||||
# Plugin directory
|
|
||||||
/.quarkus/cli/plugins/
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
FROM azul/zulu-openjdk-debian:21-jre-latest
|
|
||||||
RUN apt update && apt install -y libfuse2 curl
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
dhfs1:
|
|
||||||
build: .
|
|
||||||
privileged: true
|
|
||||||
devices:
|
|
||||||
- /dev/fuse
|
|
||||||
volumes:
|
|
||||||
- $HOME/dhfs/dhfs1:/dhfs_root
|
|
||||||
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
|
|
||||||
- ./target/quarkus-app:/app
|
|
||||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
|
||||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
|
||||||
-Ddhfs.objects.root=/dhfs_root/d
|
|
||||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
|
||||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
|
|
||||||
-jar /app/quarkus-run.jar"
|
|
||||||
ports:
|
|
||||||
- 8080:8080
|
|
||||||
- 8081:8443
|
|
||||||
- 5005:5005
|
|
||||||
dhfs2:
|
|
||||||
build: .
|
|
||||||
privileged: true
|
|
||||||
devices:
|
|
||||||
- /dev/fuse
|
|
||||||
volumes:
|
|
||||||
- $HOME/dhfs/dhfs2:/dhfs_root
|
|
||||||
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
|
|
||||||
- ./target/quarkus-app:/app
|
|
||||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
|
||||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
|
||||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
|
||||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
|
||||||
-Ddhfs.objects.root=/dhfs_root/d
|
|
||||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
|
||||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
|
|
||||||
-jar /app/quarkus-run.jar"
|
|
||||||
ports:
|
|
||||||
- 8090:8080
|
|
||||||
- 8091:8443
|
|
||||||
- 5010:5010
|
|
||||||
@@ -1,172 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
<groupId>com.usatiuk.dhfs</groupId>
|
|
||||||
<artifactId>dhfs-app</artifactId>
|
|
||||||
<version>1.0-SNAPSHOT</version>
|
|
||||||
|
|
||||||
<parent>
|
|
||||||
<groupId>com.usatiuk.dhfs</groupId>
|
|
||||||
<artifactId>parent</artifactId>
|
|
||||||
<version>1.0-SNAPSHOT</version>
|
|
||||||
</parent>
|
|
||||||
|
|
||||||
<dependencies>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.junit.jupiter</groupId>
|
|
||||||
<artifactId>junit-jupiter-params</artifactId>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.testcontainers</groupId>
|
|
||||||
<artifactId>testcontainers</artifactId>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.awaitility</groupId>
|
|
||||||
<artifactId>awaitility</artifactId>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.bouncycastle</groupId>
|
|
||||||
<artifactId>bcprov-jdk18on</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.bouncycastle</groupId>
|
|
||||||
<artifactId>bcpkix-jdk18on</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-security</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>net.openhft</groupId>
|
|
||||||
<artifactId>zero-allocation-hashing</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-grpc</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-arc</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-rest</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-rest-client</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-rest-client-jsonb</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-rest-jsonb</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-scheduler</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.quarkus</groupId>
|
|
||||||
<artifactId>quarkus-junit5</artifactId>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-lang3</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>commons-io</groupId>
|
|
||||||
<artifactId>commons-io</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.jboss.slf4j</groupId>
|
|
||||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>commons-codec</groupId>
|
|
||||||
<artifactId>commons-codec</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-collections4</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.pcollections</groupId>
|
|
||||||
<artifactId>pcollections</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-math3</artifactId>
|
|
||||||
<version>3.6.1</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.usatiuk.dhfs</groupId>
|
|
||||||
<artifactId>dhfs-fuse</artifactId>
|
|
||||||
<version>1.0-SNAPSHOT</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.usatiuk.dhfs</groupId>
|
|
||||||
<artifactId>utils</artifactId>
|
|
||||||
<version>1.0-SNAPSHOT</version>
|
|
||||||
</dependency>
|
|
||||||
</dependencies>
|
|
||||||
|
|
||||||
<build>
|
|
||||||
<plugins>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
|
||||||
<configuration>
|
|
||||||
<forkCount>1C</forkCount>
|
|
||||||
<reuseForks>false</reuseForks>
|
|
||||||
<parallel>classes</parallel>
|
|
||||||
<systemPropertyVariables>
|
|
||||||
<junit.jupiter.execution.parallel.enabled>
|
|
||||||
false
|
|
||||||
</junit.jupiter.execution.parallel.enabled>
|
|
||||||
</systemPropertyVariables>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-failsafe-plugin</artifactId>
|
|
||||||
<configuration>
|
|
||||||
<forkCount>1C</forkCount>
|
|
||||||
<reuseForks>false</reuseForks>
|
|
||||||
<parallel>classes</parallel>
|
|
||||||
<systemPropertyVariables>
|
|
||||||
<junit.jupiter.execution.parallel.enabled>
|
|
||||||
false
|
|
||||||
</junit.jupiter.execution.parallel.enabled>
|
|
||||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
|
||||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
|
||||||
</systemPropertyVariables>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>${quarkus.platform.group-id}</groupId>
|
|
||||||
<artifactId>quarkus-maven-plugin</artifactId>
|
|
||||||
<version>${quarkus.platform.version}</version>
|
|
||||||
<extensions>true</extensions>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>quarkus-plugin</id>
|
|
||||||
<goals>
|
|
||||||
<goal>build</goal>
|
|
||||||
<goal>generate-code</goal>
|
|
||||||
<goal>generate-code-tests</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
</build>
|
|
||||||
</project>
|
|
||||||
@@ -1,97 +0,0 @@
|
|||||||
####
|
|
||||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
|
||||||
#
|
|
||||||
# Before building the container image run:
|
|
||||||
#
|
|
||||||
# ./mvnw package
|
|
||||||
#
|
|
||||||
# Then, build the image with:
|
|
||||||
#
|
|
||||||
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
|
|
||||||
#
|
|
||||||
# Then run the container using:
|
|
||||||
#
|
|
||||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
|
||||||
#
|
|
||||||
# If you want to include the debug port into your docker image
|
|
||||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
|
||||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
|
||||||
# when running the container
|
|
||||||
#
|
|
||||||
# Then run the container using :
|
|
||||||
#
|
|
||||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
|
||||||
#
|
|
||||||
# This image uses the `run-java.sh` script to run the application.
|
|
||||||
# This scripts computes the command line to execute your Java application, and
|
|
||||||
# includes memory/GC tuning.
|
|
||||||
# You can configure the behavior using the following environment properties:
|
|
||||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
|
||||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
|
||||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
|
||||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
|
||||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
|
||||||
# If used in a container without any memory constraints for the container then this
|
|
||||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
|
||||||
# of the container available memory as set here. The default is `50` which means 50%
|
|
||||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
|
||||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
|
||||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
|
||||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
|
||||||
# If used in a container without any memory constraints for the container then this
|
|
||||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
|
||||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
|
||||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
|
||||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
|
||||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
|
||||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
|
||||||
# a container without any memory constraints for the container then this option has
|
|
||||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
|
||||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
|
||||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
|
||||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
|
||||||
# when things are happening. This option, if set to true, will set
|
|
||||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
|
||||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
|
||||||
# true").
|
|
||||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
|
||||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
|
||||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
|
||||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
|
||||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
|
||||||
# (example: "20")
|
|
||||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
|
||||||
# (example: "40")
|
|
||||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
|
||||||
# (example: "4")
|
|
||||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
|
||||||
# previous GC times. (example: "90")
|
|
||||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
|
||||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
|
||||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
|
||||||
# contain the necessary JRE command-line options to specify the required GC, which
|
|
||||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
|
||||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
|
||||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
|
||||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
|
||||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
|
||||||
#
|
|
||||||
###
|
|
||||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
|
||||||
|
|
||||||
ENV LANGUAGE='en_US:en'
|
|
||||||
|
|
||||||
|
|
||||||
# We make four distinct layers so if there are application changes the library layers can be re-used
|
|
||||||
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
|
|
||||||
COPY --chown=185 target/quarkus-app/*.jar /deployments/
|
|
||||||
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
|
|
||||||
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
|
|
||||||
|
|
||||||
EXPOSE 8080
|
|
||||||
USER 185
|
|
||||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
|
||||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
|
||||||
|
|
||||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
|
||||||
|
|
||||||
@@ -1,93 +0,0 @@
|
|||||||
####
|
|
||||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
|
||||||
#
|
|
||||||
# Before building the container image run:
|
|
||||||
#
|
|
||||||
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
|
|
||||||
#
|
|
||||||
# Then, build the image with:
|
|
||||||
#
|
|
||||||
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
|
|
||||||
#
|
|
||||||
# Then run the container using:
|
|
||||||
#
|
|
||||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
|
||||||
#
|
|
||||||
# If you want to include the debug port into your docker image
|
|
||||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
|
||||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
|
||||||
# when running the container
|
|
||||||
#
|
|
||||||
# Then run the container using :
|
|
||||||
#
|
|
||||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
|
||||||
#
|
|
||||||
# This image uses the `run-java.sh` script to run the application.
|
|
||||||
# This scripts computes the command line to execute your Java application, and
|
|
||||||
# includes memory/GC tuning.
|
|
||||||
# You can configure the behavior using the following environment properties:
|
|
||||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
|
||||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
|
||||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
|
||||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
|
||||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
|
||||||
# If used in a container without any memory constraints for the container then this
|
|
||||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
|
||||||
# of the container available memory as set here. The default is `50` which means 50%
|
|
||||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
|
||||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
|
||||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
|
||||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
|
||||||
# If used in a container without any memory constraints for the container then this
|
|
||||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
|
||||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
|
||||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
|
||||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
|
||||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
|
||||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
|
||||||
# a container without any memory constraints for the container then this option has
|
|
||||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
|
||||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
|
||||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
|
||||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
|
||||||
# when things are happening. This option, if set to true, will set
|
|
||||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
|
||||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
|
||||||
# true").
|
|
||||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
|
||||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
|
||||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
|
||||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
|
||||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
|
||||||
# (example: "20")
|
|
||||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
|
||||||
# (example: "40")
|
|
||||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
|
||||||
# (example: "4")
|
|
||||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
|
||||||
# previous GC times. (example: "90")
|
|
||||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
|
||||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
|
||||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
|
||||||
# contain the necessary JRE command-line options to specify the required GC, which
|
|
||||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
|
||||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
|
||||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
|
||||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
|
||||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
|
||||||
#
|
|
||||||
###
|
|
||||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
|
||||||
|
|
||||||
ENV LANGUAGE='en_US:en'
|
|
||||||
|
|
||||||
|
|
||||||
COPY target/lib/* /deployments/lib/
|
|
||||||
COPY target/*-runner.jar /deployments/quarkus-run.jar
|
|
||||||
|
|
||||||
EXPOSE 8080
|
|
||||||
USER 185
|
|
||||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
|
||||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
|
||||||
|
|
||||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
####
|
|
||||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
|
||||||
#
|
|
||||||
# Before building the container image run:
|
|
||||||
#
|
|
||||||
# ./mvnw package -Dnative
|
|
||||||
#
|
|
||||||
# Then, build the image with:
|
|
||||||
#
|
|
||||||
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
|
|
||||||
#
|
|
||||||
# Then run the container using:
|
|
||||||
#
|
|
||||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
|
||||||
#
|
|
||||||
###
|
|
||||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
|
|
||||||
WORKDIR /work/
|
|
||||||
RUN chown 1001 /work \
|
|
||||||
&& chmod "g+rwX" /work \
|
|
||||||
&& chown 1001:root /work
|
|
||||||
COPY --chown=1001:root target/*-runner /work/application
|
|
||||||
|
|
||||||
EXPOSE 8080
|
|
||||||
USER 1001
|
|
||||||
|
|
||||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
####
|
|
||||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
|
||||||
# It uses a micro base image, tuned for Quarkus native executables.
|
|
||||||
# It reduces the size of the resulting container image.
|
|
||||||
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
|
|
||||||
#
|
|
||||||
# Before building the container image run:
|
|
||||||
#
|
|
||||||
# ./mvnw package -Dnative
|
|
||||||
#
|
|
||||||
# Then, build the image with:
|
|
||||||
#
|
|
||||||
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
|
|
||||||
#
|
|
||||||
# Then run the container using:
|
|
||||||
#
|
|
||||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
|
||||||
#
|
|
||||||
###
|
|
||||||
FROM quay.io/quarkus/quarkus-micro-image:2.0
|
|
||||||
WORKDIR /work/
|
|
||||||
RUN chown 1001 /work \
|
|
||||||
&& chmod "g+rwX" /work \
|
|
||||||
&& chown 1001:root /work
|
|
||||||
COPY --chown=1001:root target/*-runner /work/application
|
|
||||||
|
|
||||||
EXPOSE 8080
|
|
||||||
USER 1001
|
|
||||||
|
|
||||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
quarkus.grpc.server.use-separate-server=false
|
|
||||||
dhfs.objects.peerdiscovery.port=42069
|
|
||||||
dhfs.objects.peerdiscovery.interval=4s
|
|
||||||
dhfs.objects.peerdiscovery.broadcast=true
|
|
||||||
dhfs.objects.sync.timeout=30
|
|
||||||
dhfs.objects.sync.ping.timeout=5
|
|
||||||
dhfs.objects.invalidation.threads=16
|
|
||||||
dhfs.objects.invalidation.delay=1000
|
|
||||||
dhfs.objects.reconnect_interval=5s
|
|
||||||
dhfs.objects.write_log=false
|
|
||||||
dhfs.objects.periodic-push-op-interval=5m
|
|
||||||
dhfs.fuse.root=${HOME}/dhfs_default/fuse
|
|
||||||
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
|
||||||
dhfs.fuse.debug=false
|
|
||||||
dhfs.fuse.enabled=true
|
|
||||||
dhfs.files.allow_recursive_delete=false
|
|
||||||
dhfs.files.target_chunk_size=524288
|
|
||||||
dhfs.files.max_chunk_size=524288
|
|
||||||
dhfs.files.target_chunk_alignment=17
|
|
||||||
dhfs.objects.deletion.delay=1000
|
|
||||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
|
||||||
dhfs.objects.ref_verification=true
|
|
||||||
dhfs.files.use_hash_for_chunks=false
|
|
||||||
dhfs.objects.autosync.threads=16
|
|
||||||
dhfs.objects.autosync.download-all=false
|
|
||||||
dhfs.objects.move-processor.threads=16
|
|
||||||
dhfs.objects.ref-processor.threads=16
|
|
||||||
dhfs.objects.opsender.batch-size=100
|
|
||||||
dhfs.objects.lock_timeout_secs=2
|
|
||||||
dhfs.local-discovery=true
|
|
||||||
dhfs.peerdiscovery.timeout=10000
|
|
||||||
quarkus.log.category."com.usatiuk".min-level=TRACE
|
|
||||||
quarkus.log.category."com.usatiuk".level=TRACE
|
|
||||||
quarkus.http.insecure-requests=enabled
|
|
||||||
quarkus.http.ssl.client-auth=required
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
package com.usatiuk.dhfsapp;
|
|
||||||
|
|
||||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
abstract public class TempDataProfile implements QuarkusTestProfile {
|
|
||||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
final public Map<String, String> getConfigOverrides() {
|
|
||||||
Path tempDirWithPrefix;
|
|
||||||
try {
|
|
||||||
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
var ret = new HashMap<String, String>();
|
|
||||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
|
||||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
|
||||||
getConfigOverrides(ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
package com.usatiuk.dhfsapp;
|
|
||||||
|
|
||||||
import io.quarkus.logging.Log;
|
|
||||||
import io.quarkus.runtime.ShutdownEvent;
|
|
||||||
import io.quarkus.runtime.StartupEvent;
|
|
||||||
import jakarta.annotation.Priority;
|
|
||||||
import jakarta.enterprise.context.ApplicationScoped;
|
|
||||||
import jakarta.enterprise.event.Observes;
|
|
||||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.util.Objects;
|
|
||||||
|
|
||||||
@ApplicationScoped
|
|
||||||
public class TestDataCleaner {
|
|
||||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
|
||||||
String tempDirectory;
|
|
||||||
|
|
||||||
public static void purgeDirectory(File dir) {
|
|
||||||
try {
|
|
||||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
|
||||||
if (file.isDirectory())
|
|
||||||
purgeDirectory(file);
|
|
||||||
file.delete();
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
Log.error("Couldn't purge directory " + dir, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
|
||||||
try {
|
|
||||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
|
||||||
} catch (Exception ignored) {
|
|
||||||
Log.warn("Couldn't cleanup test data on init");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
|
||||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
|
|
||||||
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
|
|
||||||
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
|
|
||||||
dhfs.objects.ref_verification=true
|
|
||||||
dhfs.objects.deletion.delay=0
|
|
||||||
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
|
|
||||||
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
|
|
||||||
quarkus.http.test-port=0
|
|
||||||
quarkus.http.test-ssl-port=0
|
|
||||||
dhfs.local-discovery=false
|
|
||||||
dhfs.objects.persistence.snapshot-extra-checks=true
|
|
||||||
@@ -2,47 +2,645 @@ package com.usatiuk.dhfsfs.service;
|
|||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
import com.google.protobuf.UnsafeByteOperations;
|
import com.google.protobuf.UnsafeByteOperations;
|
||||||
|
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||||
|
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||||
|
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
||||||
|
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||||
|
import com.usatiuk.dhfs.jmap.JMapEntry;
|
||||||
|
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||||
|
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||||
|
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||||
|
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||||
|
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||||
|
import com.usatiuk.dhfsfs.objects.ChunkData;
|
||||||
|
import com.usatiuk.dhfsfs.objects.File;
|
||||||
|
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
|
||||||
|
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
|
||||||
|
import com.usatiuk.objects.JData;
|
||||||
import com.usatiuk.objects.JObjectKey;
|
import com.usatiuk.objects.JObjectKey;
|
||||||
|
import com.usatiuk.objects.iterators.IteratorStart;
|
||||||
|
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||||
|
import com.usatiuk.objects.transaction.Transaction;
|
||||||
|
import com.usatiuk.objects.transaction.TransactionManager;
|
||||||
|
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
|
||||||
|
import io.grpc.Status;
|
||||||
|
import io.grpc.StatusRuntimeException;
|
||||||
|
import io.quarkus.logging.Log;
|
||||||
|
import io.quarkus.runtime.StartupEvent;
|
||||||
|
import jakarta.annotation.Priority;
|
||||||
|
import jakarta.enterprise.context.ApplicationScoped;
|
||||||
|
import jakarta.enterprise.event.Observes;
|
||||||
|
import jakarta.inject.Inject;
|
||||||
import org.apache.commons.lang3.tuple.Pair;
|
import org.apache.commons.lang3.tuple.Pair;
|
||||||
|
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||||
|
|
||||||
import java.util.Optional;
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.*;
|
||||||
|
import java.util.stream.StreamSupport;
|
||||||
|
|
||||||
public interface DhfsFileService {
|
@ApplicationScoped
|
||||||
Optional<JObjectKey> open(String name);
|
public class DhfsFileService {
|
||||||
|
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
|
||||||
|
int targetChunkAlignment;
|
||||||
|
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||||
|
int targetChunkSize;
|
||||||
|
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
|
||||||
|
int maxChunkSize;
|
||||||
|
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||||
|
boolean useHashForChunks;
|
||||||
|
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
|
||||||
|
boolean allowRecursiveDelete;
|
||||||
|
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
||||||
|
boolean refVerification;
|
||||||
|
@ConfigProperty(name = "dhfs.objects.write_log")
|
||||||
|
boolean writeLogging;
|
||||||
|
|
||||||
Optional<JObjectKey> create(String name, long mode);
|
@Inject
|
||||||
|
Transaction curTx;
|
||||||
|
@Inject
|
||||||
|
RemoteTransaction remoteTx;
|
||||||
|
@Inject
|
||||||
|
TransactionManager jObjectTxManager;
|
||||||
|
@Inject
|
||||||
|
JKleppmannTreeManager jKleppmannTreeManager;
|
||||||
|
@Inject
|
||||||
|
JMapHelper jMapHelper;
|
||||||
|
|
||||||
Pair<String, JObjectKey> inoToParent(JObjectKey ino);
|
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||||
|
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
|
||||||
void mkdir(String name, long mode);
|
|
||||||
|
|
||||||
Optional<GetattrRes> getattr(JObjectKey name);
|
|
||||||
|
|
||||||
Boolean chmod(JObjectKey name, long mode);
|
|
||||||
|
|
||||||
void unlink(String name);
|
|
||||||
|
|
||||||
Boolean rename(String from, String to);
|
|
||||||
|
|
||||||
Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs);
|
|
||||||
|
|
||||||
Iterable<String> readDir(String name);
|
|
||||||
|
|
||||||
long size(JObjectKey fileUuid);
|
|
||||||
|
|
||||||
ByteString read(JObjectKey fileUuid, long offset, int length);
|
|
||||||
|
|
||||||
Long write(JObjectKey fileUuid, long offset, ByteString data);
|
|
||||||
|
|
||||||
default Long write(JObjectKey fileUuid, long offset, byte[] data) {
|
|
||||||
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Boolean truncate(JObjectKey fileUuid, long length);
|
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||||
|
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC, () -> new JKleppmannTreeNodeMetaDirectory(""));
|
||||||
|
}
|
||||||
|
|
||||||
String readlink(JObjectKey uuid);
|
private ChunkData createChunk(ByteString bytes) {
|
||||||
|
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
|
||||||
|
remoteTx.putDataNew(newChunk);
|
||||||
|
return newChunk;
|
||||||
|
}
|
||||||
|
|
||||||
ByteString readlinkBS(JObjectKey uuid);
|
void init(@Observes @Priority(500) StartupEvent event) {
|
||||||
|
Log.info("Initializing file service");
|
||||||
|
getTreeW();
|
||||||
|
}
|
||||||
|
|
||||||
JObjectKey symlink(String oldpath, String newpath);
|
private JKleppmannTreeNode getDirEntryW(String name) {
|
||||||
|
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||||
|
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||||
|
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
private JKleppmannTreeNode getDirEntryR(String name) {
|
||||||
|
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||||
|
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||||
|
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
|
||||||
|
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||||
|
if (res == null) return Optional.empty();
|
||||||
|
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Optional<GetattrRes> getattr(JObjectKey uuid) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
var ref = curTx.get(JData.class, uuid).orElse(null);
|
||||||
|
if (ref == null) return Optional.empty();
|
||||||
|
GetattrRes ret;
|
||||||
|
if (ref instanceof RemoteObjectMeta r) {
|
||||||
|
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
||||||
|
if (remote instanceof File f) {
|
||||||
|
ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE);
|
||||||
|
} else {
|
||||||
|
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||||
|
}
|
||||||
|
} else if (ref instanceof JKleppmannTreeNodeHolder) {
|
||||||
|
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
|
||||||
|
} else {
|
||||||
|
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||||
|
}
|
||||||
|
return Optional.of(ret);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Optional<JObjectKey> open(String name) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
try {
|
||||||
|
var ret = getDirEntryR(name);
|
||||||
|
return switch (ret.meta()) {
|
||||||
|
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
|
||||||
|
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
|
||||||
|
default -> Optional.empty();
|
||||||
|
};
|
||||||
|
} catch (StatusRuntimeException e) {
|
||||||
|
if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
|
||||||
|
return Optional.empty();
|
||||||
|
}
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private void ensureDir(JKleppmannTreeNode entry) {
|
||||||
|
if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory))
|
||||||
|
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Optional<JObjectKey> create(String name, long mode) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
Path path = Path.of(name);
|
||||||
|
var parent = getDirEntryW(path.getParent().toString());
|
||||||
|
|
||||||
|
ensureDir(parent);
|
||||||
|
|
||||||
|
String fname = path.getFileName().toString();
|
||||||
|
|
||||||
|
var fuuid = UUID.randomUUID();
|
||||||
|
Log.debug("Creating file " + fuuid);
|
||||||
|
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
|
||||||
|
remoteTx.putData(f);
|
||||||
|
|
||||||
|
try {
|
||||||
|
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
||||||
|
} catch (Exception e) {
|
||||||
|
// fobj.getMeta().removeRef(newNodeId);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
return Optional.of(f.key());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
//FIXME: Slow..
|
||||||
|
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
return getTreeW().findParent(w -> {
|
||||||
|
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
|
||||||
|
return f.fileIno().equals(ino);
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public void mkdir(String name, long mode) {
|
||||||
|
jObjectTxManager.executeTx(() -> {
|
||||||
|
Path path = Path.of(name);
|
||||||
|
var parent = getDirEntryW(path.getParent().toString());
|
||||||
|
ensureDir(parent);
|
||||||
|
|
||||||
|
String dname = path.getFileName().toString();
|
||||||
|
|
||||||
|
Log.debug("Creating directory " + name);
|
||||||
|
|
||||||
|
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public void unlink(String name) {
|
||||||
|
jObjectTxManager.executeTx(() -> {
|
||||||
|
var node = getDirEntryOpt(name).orElse(null);
|
||||||
|
if (node == null)
|
||||||
|
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
|
||||||
|
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
|
||||||
|
if (!allowRecursiveDelete && !node.children().isEmpty())
|
||||||
|
throw new DirectoryNotEmptyException();
|
||||||
|
}
|
||||||
|
getTreeW().trash(node.meta(), node.key());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Boolean rename(String from, String to) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
var node = getDirEntryW(from);
|
||||||
|
JKleppmannTreeNodeMeta meta = node.meta();
|
||||||
|
|
||||||
|
var toPath = Path.of(to);
|
||||||
|
var toDentry = getDirEntryW(toPath.getParent().toString());
|
||||||
|
ensureDir(toDentry);
|
||||||
|
|
||||||
|
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Boolean chmod(JObjectKey uuid, long mode) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||||
|
|
||||||
|
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||||
|
return true;
|
||||||
|
} else if (dent instanceof RemoteObjectMeta) {
|
||||||
|
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
||||||
|
if (remote instanceof File f) {
|
||||||
|
remoteTx.putData(f.withMode(mode).withCurrentMTime());
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException(uuid + " is not a file");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException(uuid + " is not a file");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Iterable<String> readDir(String name) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
var found = getDirEntryW(name);
|
||||||
|
|
||||||
|
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
|
||||||
|
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||||
|
|
||||||
|
return found.children().keySet();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public ByteString read(JObjectKey fileUuid, long offset, int length) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
if (length < 0)
|
||||||
|
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||||
|
if (offset < 0)
|
||||||
|
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||||
|
|
||||||
|
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||||
|
if (file == null) {
|
||||||
|
Log.error("File not found when trying to read: " + fileUuid);
|
||||||
|
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
|
||||||
|
}
|
||||||
|
|
||||||
|
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||||
|
if (!it.hasNext())
|
||||||
|
return ByteString.empty();
|
||||||
|
|
||||||
|
// if (it.peekNextKey().key() != offset) {
|
||||||
|
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
|
||||||
|
// return Optional.of(ByteString.empty());
|
||||||
|
// }
|
||||||
|
long curPos = offset;
|
||||||
|
ByteString buf = ByteString.empty();
|
||||||
|
|
||||||
|
var chunk = it.next();
|
||||||
|
|
||||||
|
while (curPos < offset + length) {
|
||||||
|
var chunkPos = chunk.getKey().key();
|
||||||
|
|
||||||
|
long offInChunk = curPos - chunkPos;
|
||||||
|
|
||||||
|
long toReadInChunk = (offset + length) - curPos;
|
||||||
|
|
||||||
|
var chunkBytes = readChunk(chunk.getValue().ref());
|
||||||
|
|
||||||
|
long readableLen = chunkBytes.size() - offInChunk;
|
||||||
|
|
||||||
|
var toReadReally = Math.min(readableLen, toReadInChunk);
|
||||||
|
|
||||||
|
if (toReadReally < 0) break;
|
||||||
|
|
||||||
|
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
|
||||||
|
|
||||||
|
curPos += toReadReally;
|
||||||
|
|
||||||
|
if (readableLen > toReadInChunk)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!it.hasNext()) break;
|
||||||
|
|
||||||
|
chunk = it.next();
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf;
|
||||||
|
} catch (Exception e) {
|
||||||
|
Log.error("Error reading file: " + fileUuid, e);
|
||||||
|
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private ByteString readChunk(JObjectKey uuid) {
|
||||||
|
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
|
||||||
|
|
||||||
|
if (chunkRead == null) {
|
||||||
|
Log.error("Chunk requested not found: " + uuid);
|
||||||
|
throw new StatusRuntimeException(Status.NOT_FOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
return chunkRead.data();
|
||||||
|
}
|
||||||
|
|
||||||
|
private int getChunkSize(JObjectKey uuid) {
|
||||||
|
return readChunk(uuid).size();
|
||||||
|
}
|
||||||
|
|
||||||
|
private long alignDown(long num, long n) {
|
||||||
|
return num & -(1L << n);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
if (offset < 0)
|
||||||
|
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||||
|
|
||||||
|
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
|
||||||
|
if (file == null) {
|
||||||
|
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<Long, JObjectKey> removedChunks = new HashMap<>();
|
||||||
|
|
||||||
|
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
||||||
|
long writeEnd = offset + data.size();
|
||||||
|
long start = realOffset;
|
||||||
|
long existingEnd = 0;
|
||||||
|
ByteString pendingPrefix = ByteString.empty();
|
||||||
|
ByteString pendingSuffix = ByteString.empty();
|
||||||
|
|
||||||
|
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
|
||||||
|
while (it.hasNext()) {
|
||||||
|
var curEntry = it.next();
|
||||||
|
long curChunkStart = curEntry.getKey().key();
|
||||||
|
var curChunkId = curEntry.getValue().ref();
|
||||||
|
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
|
||||||
|
existingEnd = curChunkEnd;
|
||||||
|
if (curChunkEnd <= realOffset) break;
|
||||||
|
|
||||||
|
removedChunks.put(curEntry.getKey().key(), curChunkId);
|
||||||
|
|
||||||
|
if (curChunkStart < offset) {
|
||||||
|
if (curChunkStart < start)
|
||||||
|
start = curChunkStart;
|
||||||
|
|
||||||
|
var readChunk = readChunk(curChunkId);
|
||||||
|
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (curChunkEnd > writeEnd) {
|
||||||
|
var readChunk = readChunk(curChunkId);
|
||||||
|
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (curChunkEnd >= writeEnd) break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Map<Long, JObjectKey> newChunks = new HashMap<>();
|
||||||
|
|
||||||
|
if (existingEnd < offset) {
|
||||||
|
if (!pendingPrefix.isEmpty()) {
|
||||||
|
int diff = Math.toIntExact(offset - existingEnd);
|
||||||
|
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
|
||||||
|
} else {
|
||||||
|
fillZeros(existingEnd, offset, newChunks);
|
||||||
|
start = offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
|
||||||
|
|
||||||
|
int combinedSize = pendingWrites.size();
|
||||||
|
|
||||||
|
{
|
||||||
|
int cur = 0;
|
||||||
|
while (cur < combinedSize) {
|
||||||
|
int end;
|
||||||
|
|
||||||
|
if (combinedSize - cur < maxChunkSize)
|
||||||
|
end = combinedSize;
|
||||||
|
else if (targetChunkAlignment < 0)
|
||||||
|
end = combinedSize;
|
||||||
|
else
|
||||||
|
end = Math.min(cur + targetChunkSize, combinedSize);
|
||||||
|
|
||||||
|
var thisChunk = pendingWrites.substring(cur, end);
|
||||||
|
|
||||||
|
ChunkData newChunkData = createChunk(thisChunk);
|
||||||
|
newChunks.put(start, newChunkData.key());
|
||||||
|
|
||||||
|
start += thisChunk.size();
|
||||||
|
cur = end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var e : removedChunks.entrySet()) {
|
||||||
|
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||||
|
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var e : newChunks.entrySet()) {
|
||||||
|
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||||
|
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteTx.putData(file.withCurrentMTime());
|
||||||
|
|
||||||
|
return (long) data.size();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Boolean truncate(JObjectKey fileUuid, long length) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
if (length < 0)
|
||||||
|
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||||
|
|
||||||
|
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||||
|
if (file == null) {
|
||||||
|
Log.error("File not found when trying to write: " + fileUuid);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (length == 0) {
|
||||||
|
jMapHelper.deleteAll(file);
|
||||||
|
remoteTx.putData(file);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
var curSize = size(fileUuid);
|
||||||
|
if (curSize == length) return true;
|
||||||
|
|
||||||
|
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||||
|
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||||
|
|
||||||
|
if (curSize < length) {
|
||||||
|
fillZeros(curSize, length, newChunks);
|
||||||
|
} else {
|
||||||
|
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
|
||||||
|
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||||
|
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) {
|
||||||
|
last = it.hasNext() ? it.next() : null;
|
||||||
|
while (it.hasNext()) {
|
||||||
|
var next = it.next();
|
||||||
|
removedChunks.put(next.getKey().key(), next.getValue().ref());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
removedChunks.put(last.getKey().key(), last.getValue().ref());
|
||||||
|
//
|
||||||
|
// NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||||
|
//
|
||||||
|
// long start = 0;
|
||||||
|
//
|
||||||
|
// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||||
|
// first = it.hasNext() ? it.next() : null;
|
||||||
|
// boolean empty = last == null;
|
||||||
|
// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
|
||||||
|
// first = null;
|
||||||
|
// last = null;
|
||||||
|
// start = offset;
|
||||||
|
// } else if (!empty) {
|
||||||
|
// assert first != null;
|
||||||
|
// removedChunks.put(first.getKey().key(), first.getValue().ref());
|
||||||
|
// while (it.hasNext() && it.peekNextKey() != last.getKey()) {
|
||||||
|
// var next = it.next();
|
||||||
|
// removedChunks.put(next.getKey().key(), next.getValue().ref());
|
||||||
|
// }
|
||||||
|
// removedChunks.put(last.getKey().key(), last.getValue().ref());
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// var tail = chunksAll.lowerEntry(length);
|
||||||
|
// var afterTail = chunksAll.tailMap(tail.getKey(), false);
|
||||||
|
//
|
||||||
|
// removedChunks.put(tail.getKey(), tail.getValue());
|
||||||
|
// removedChunks.putAll(afterTail);
|
||||||
|
|
||||||
|
var tailBytes = readChunk(last.getValue().ref());
|
||||||
|
var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key()));
|
||||||
|
|
||||||
|
ChunkData newChunkData = createChunk(newChunk);
|
||||||
|
newChunks.put(last.getKey().key(), newChunkData.key());
|
||||||
|
}
|
||||||
|
|
||||||
|
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
|
||||||
|
|
||||||
|
for (var e : removedChunks.entrySet()) {
|
||||||
|
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||||
|
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var e : newChunks.entrySet()) {
|
||||||
|
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||||
|
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteTx.putData(file.withCurrentMTime());
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
|
||||||
|
long combinedSize = (length - fillStart);
|
||||||
|
|
||||||
|
long start = fillStart;
|
||||||
|
|
||||||
|
// Hack
|
||||||
|
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
|
||||||
|
|
||||||
|
{
|
||||||
|
long cur = 0;
|
||||||
|
while (cur < combinedSize) {
|
||||||
|
long end;
|
||||||
|
|
||||||
|
if (targetChunkSize <= 0)
|
||||||
|
end = combinedSize;
|
||||||
|
else {
|
||||||
|
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
||||||
|
end = cur + targetChunkSize;
|
||||||
|
} else {
|
||||||
|
end = combinedSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!zeroCache.containsKey(end - cur))
|
||||||
|
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
|
||||||
|
|
||||||
|
ChunkData newChunkData = zeroCache.get(end - cur);
|
||||||
|
newChunks.put(start, newChunkData.key());
|
||||||
|
|
||||||
|
start += newChunkData.data().size();
|
||||||
|
cur = end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public String readlink(JObjectKey uuid) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
return readlinkBS(uuid).toStringUtf8();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public ByteString readlinkBS(JObjectKey uuid) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
|
||||||
|
return read(uuid, 0, Math.toIntExact(size(uuid)));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public JObjectKey symlink(String oldpath, String newpath) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
Path path = Path.of(newpath);
|
||||||
|
var parent = getDirEntryW(path.getParent().toString());
|
||||||
|
|
||||||
|
ensureDir(parent);
|
||||||
|
|
||||||
|
String fname = path.getFileName().toString();
|
||||||
|
|
||||||
|
var fuuid = UUID.randomUUID();
|
||||||
|
Log.debug("Creating file " + fuuid);
|
||||||
|
|
||||||
|
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
|
||||||
|
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
|
||||||
|
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
|
||||||
|
|
||||||
|
remoteTx.putData(f);
|
||||||
|
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
||||||
|
return f.key();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||||
|
|
||||||
|
// FIXME:
|
||||||
|
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||||
|
return true;
|
||||||
|
} else if (dent instanceof RemoteObjectMeta) {
|
||||||
|
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
|
||||||
|
if (remote instanceof File f) {
|
||||||
|
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException(fileUuid + " is not a file");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException(fileUuid + " is not a file");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public long size(JObjectKey fileUuid) {
|
||||||
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
long realSize = 0;
|
||||||
|
var file = remoteTx.getData(File.class, fileUuid)
|
||||||
|
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
|
||||||
|
|
||||||
|
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||||
|
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
|
||||||
|
last = it.hasNext() ? it.next() : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (last != null) {
|
||||||
|
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
|
||||||
|
}
|
||||||
|
|
||||||
|
return realSize;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long write(JObjectKey fileUuid, long offset, byte[] data) {
|
||||||
|
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,667 +0,0 @@
|
|||||||
package com.usatiuk.dhfsfs.service;
|
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
|
||||||
import com.google.protobuf.UnsafeByteOperations;
|
|
||||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
|
||||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
|
||||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
|
||||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
|
||||||
import com.usatiuk.dhfs.jmap.JMapEntry;
|
|
||||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
|
||||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
|
||||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
|
||||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
|
||||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
|
||||||
import com.usatiuk.dhfsfs.objects.ChunkData;
|
|
||||||
import com.usatiuk.dhfsfs.objects.File;
|
|
||||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
|
|
||||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
|
|
||||||
import com.usatiuk.objects.JData;
|
|
||||||
import com.usatiuk.objects.JObjectKey;
|
|
||||||
import com.usatiuk.objects.iterators.IteratorStart;
|
|
||||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
|
||||||
import com.usatiuk.objects.transaction.Transaction;
|
|
||||||
import com.usatiuk.objects.transaction.TransactionManager;
|
|
||||||
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
|
|
||||||
import io.grpc.Status;
|
|
||||||
import io.grpc.StatusRuntimeException;
|
|
||||||
import io.quarkus.logging.Log;
|
|
||||||
import io.quarkus.runtime.StartupEvent;
|
|
||||||
import jakarta.annotation.Priority;
|
|
||||||
import jakarta.enterprise.context.ApplicationScoped;
|
|
||||||
import jakarta.enterprise.event.Observes;
|
|
||||||
import jakarta.inject.Inject;
|
|
||||||
import org.apache.commons.lang3.tuple.Pair;
|
|
||||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.util.*;
|
|
||||||
import java.util.stream.StreamSupport;
|
|
||||||
|
|
||||||
@ApplicationScoped
|
|
||||||
public class DhfsFileServiceImpl implements DhfsFileService {
|
|
||||||
@Inject
|
|
||||||
Transaction curTx;
|
|
||||||
@Inject
|
|
||||||
RemoteTransaction remoteTx;
|
|
||||||
@Inject
|
|
||||||
TransactionManager jObjectTxManager;
|
|
||||||
|
|
||||||
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
|
|
||||||
int targetChunkAlignment;
|
|
||||||
|
|
||||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
|
||||||
int targetChunkSize;
|
|
||||||
|
|
||||||
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
|
|
||||||
int maxChunkSize;
|
|
||||||
|
|
||||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
|
||||||
boolean useHashForChunks;
|
|
||||||
|
|
||||||
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
|
|
||||||
boolean allowRecursiveDelete;
|
|
||||||
|
|
||||||
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
|
||||||
boolean refVerification;
|
|
||||||
|
|
||||||
@ConfigProperty(name = "dhfs.objects.write_log")
|
|
||||||
boolean writeLogging;
|
|
||||||
|
|
||||||
@Inject
|
|
||||||
JKleppmannTreeManager jKleppmannTreeManager;
|
|
||||||
|
|
||||||
@Inject
|
|
||||||
JMapHelper jMapHelper;
|
|
||||||
|
|
||||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
|
||||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
|
|
||||||
}
|
|
||||||
|
|
||||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
|
||||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC, () -> new JKleppmannTreeNodeMetaDirectory(""));
|
|
||||||
}
|
|
||||||
|
|
||||||
private ChunkData createChunk(ByteString bytes) {
|
|
||||||
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
|
|
||||||
remoteTx.putDataNew(newChunk);
|
|
||||||
return newChunk;
|
|
||||||
}
|
|
||||||
|
|
||||||
void init(@Observes @Priority(500) StartupEvent event) {
|
|
||||||
Log.info("Initializing file service");
|
|
||||||
getTreeW();
|
|
||||||
}
|
|
||||||
|
|
||||||
private JKleppmannTreeNode getDirEntryW(String name) {
|
|
||||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
|
||||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
|
||||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
private JKleppmannTreeNode getDirEntryR(String name) {
|
|
||||||
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
|
||||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
|
||||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
|
|
||||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
|
||||||
if (res == null) return Optional.empty();
|
|
||||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Optional<GetattrRes> getattr(JObjectKey uuid) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
var ref = curTx.get(JData.class, uuid).orElse(null);
|
|
||||||
if (ref == null) return Optional.empty();
|
|
||||||
GetattrRes ret;
|
|
||||||
if (ref instanceof RemoteObjectMeta r) {
|
|
||||||
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
|
||||||
if (remote instanceof File f) {
|
|
||||||
ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE);
|
|
||||||
} else {
|
|
||||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
|
||||||
}
|
|
||||||
} else if (ref instanceof JKleppmannTreeNodeHolder) {
|
|
||||||
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
|
|
||||||
} else {
|
|
||||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
|
||||||
}
|
|
||||||
return Optional.of(ret);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Optional<JObjectKey> open(String name) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
try {
|
|
||||||
var ret = getDirEntryR(name);
|
|
||||||
return switch (ret.meta()) {
|
|
||||||
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
|
|
||||||
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
|
|
||||||
default -> Optional.empty();
|
|
||||||
};
|
|
||||||
} catch (StatusRuntimeException e) {
|
|
||||||
if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
|
|
||||||
return Optional.empty();
|
|
||||||
}
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private void ensureDir(JKleppmannTreeNode entry) {
|
|
||||||
if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory))
|
|
||||||
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Optional<JObjectKey> create(String name, long mode) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
Path path = Path.of(name);
|
|
||||||
var parent = getDirEntryW(path.getParent().toString());
|
|
||||||
|
|
||||||
ensureDir(parent);
|
|
||||||
|
|
||||||
String fname = path.getFileName().toString();
|
|
||||||
|
|
||||||
var fuuid = UUID.randomUUID();
|
|
||||||
Log.debug("Creating file " + fuuid);
|
|
||||||
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
|
|
||||||
remoteTx.putData(f);
|
|
||||||
|
|
||||||
try {
|
|
||||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
|
||||||
} catch (Exception e) {
|
|
||||||
// fobj.getMeta().removeRef(newNodeId);
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
return Optional.of(f.key());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
//FIXME: Slow..
|
|
||||||
@Override
|
|
||||||
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
return getTreeW().findParent(w -> {
|
|
||||||
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
|
|
||||||
return f.fileIno().equals(ino);
|
|
||||||
return false;
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void mkdir(String name, long mode) {
|
|
||||||
jObjectTxManager.executeTx(() -> {
|
|
||||||
Path path = Path.of(name);
|
|
||||||
var parent = getDirEntryW(path.getParent().toString());
|
|
||||||
ensureDir(parent);
|
|
||||||
|
|
||||||
String dname = path.getFileName().toString();
|
|
||||||
|
|
||||||
Log.debug("Creating directory " + name);
|
|
||||||
|
|
||||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void unlink(String name) {
|
|
||||||
jObjectTxManager.executeTx(() -> {
|
|
||||||
var node = getDirEntryOpt(name).orElse(null);
|
|
||||||
if (node == null)
|
|
||||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
|
|
||||||
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
|
|
||||||
if (!allowRecursiveDelete && !node.children().isEmpty())
|
|
||||||
throw new DirectoryNotEmptyException();
|
|
||||||
}
|
|
||||||
getTreeW().trash(node.meta(), node.key());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Boolean rename(String from, String to) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
var node = getDirEntryW(from);
|
|
||||||
JKleppmannTreeNodeMeta meta = node.meta();
|
|
||||||
|
|
||||||
var toPath = Path.of(to);
|
|
||||||
var toDentry = getDirEntryW(toPath.getParent().toString());
|
|
||||||
ensureDir(toDentry);
|
|
||||||
|
|
||||||
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Boolean chmod(JObjectKey uuid, long mode) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
|
||||||
|
|
||||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
|
||||||
return true;
|
|
||||||
} else if (dent instanceof RemoteObjectMeta) {
|
|
||||||
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
|
||||||
if (remote instanceof File f) {
|
|
||||||
remoteTx.putData(f.withMode(mode).withCurrentMTime());
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
throw new IllegalArgumentException(uuid + " is not a file");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw new IllegalArgumentException(uuid + " is not a file");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Iterable<String> readDir(String name) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
var found = getDirEntryW(name);
|
|
||||||
|
|
||||||
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
|
|
||||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
|
||||||
|
|
||||||
return found.children().keySet();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ByteString read(JObjectKey fileUuid, long offset, int length) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
if (length < 0)
|
|
||||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
|
||||||
if (offset < 0)
|
|
||||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
|
||||||
|
|
||||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
|
||||||
if (file == null) {
|
|
||||||
Log.error("File not found when trying to read: " + fileUuid);
|
|
||||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
|
|
||||||
}
|
|
||||||
|
|
||||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
|
||||||
if (!it.hasNext())
|
|
||||||
return ByteString.empty();
|
|
||||||
|
|
||||||
// if (it.peekNextKey().key() != offset) {
|
|
||||||
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
|
|
||||||
// return Optional.of(ByteString.empty());
|
|
||||||
// }
|
|
||||||
long curPos = offset;
|
|
||||||
ByteString buf = ByteString.empty();
|
|
||||||
|
|
||||||
var chunk = it.next();
|
|
||||||
|
|
||||||
while (curPos < offset + length) {
|
|
||||||
var chunkPos = chunk.getKey().key();
|
|
||||||
|
|
||||||
long offInChunk = curPos - chunkPos;
|
|
||||||
|
|
||||||
long toReadInChunk = (offset + length) - curPos;
|
|
||||||
|
|
||||||
var chunkBytes = readChunk(chunk.getValue().ref());
|
|
||||||
|
|
||||||
long readableLen = chunkBytes.size() - offInChunk;
|
|
||||||
|
|
||||||
var toReadReally = Math.min(readableLen, toReadInChunk);
|
|
||||||
|
|
||||||
if (toReadReally < 0) break;
|
|
||||||
|
|
||||||
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
|
|
||||||
|
|
||||||
curPos += toReadReally;
|
|
||||||
|
|
||||||
if (readableLen > toReadInChunk)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (!it.hasNext()) break;
|
|
||||||
|
|
||||||
chunk = it.next();
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf;
|
|
||||||
} catch (Exception e) {
|
|
||||||
Log.error("Error reading file: " + fileUuid, e);
|
|
||||||
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private ByteString readChunk(JObjectKey uuid) {
|
|
||||||
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
|
|
||||||
|
|
||||||
if (chunkRead == null) {
|
|
||||||
Log.error("Chunk requested not found: " + uuid);
|
|
||||||
throw new StatusRuntimeException(Status.NOT_FOUND);
|
|
||||||
}
|
|
||||||
|
|
||||||
return chunkRead.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
private int getChunkSize(JObjectKey uuid) {
|
|
||||||
return readChunk(uuid).size();
|
|
||||||
}
|
|
||||||
|
|
||||||
private long alignDown(long num, long n) {
|
|
||||||
return num & -(1L << n);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
if (offset < 0)
|
|
||||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
|
||||||
|
|
||||||
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
|
|
||||||
if (file == null) {
|
|
||||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
|
|
||||||
}
|
|
||||||
|
|
||||||
Map<Long, JObjectKey> removedChunks = new HashMap<>();
|
|
||||||
|
|
||||||
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
|
||||||
long writeEnd = offset + data.size();
|
|
||||||
long start = realOffset;
|
|
||||||
long existingEnd = 0;
|
|
||||||
ByteString pendingPrefix = ByteString.empty();
|
|
||||||
ByteString pendingSuffix = ByteString.empty();
|
|
||||||
|
|
||||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
|
|
||||||
while (it.hasNext()) {
|
|
||||||
var curEntry = it.next();
|
|
||||||
long curChunkStart = curEntry.getKey().key();
|
|
||||||
var curChunkId = curEntry.getValue().ref();
|
|
||||||
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
|
|
||||||
existingEnd = curChunkEnd;
|
|
||||||
if (curChunkEnd <= realOffset) break;
|
|
||||||
|
|
||||||
removedChunks.put(curEntry.getKey().key(), curChunkId);
|
|
||||||
|
|
||||||
if (curChunkStart < offset) {
|
|
||||||
if (curChunkStart < start)
|
|
||||||
start = curChunkStart;
|
|
||||||
|
|
||||||
var readChunk = readChunk(curChunkId);
|
|
||||||
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (curChunkEnd > writeEnd) {
|
|
||||||
var readChunk = readChunk(curChunkId);
|
|
||||||
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (curChunkEnd >= writeEnd) break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Map<Long, JObjectKey> newChunks = new HashMap<>();
|
|
||||||
|
|
||||||
if (existingEnd < offset) {
|
|
||||||
if (!pendingPrefix.isEmpty()) {
|
|
||||||
int diff = Math.toIntExact(offset - existingEnd);
|
|
||||||
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
|
|
||||||
} else {
|
|
||||||
fillZeros(existingEnd, offset, newChunks);
|
|
||||||
start = offset;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
|
|
||||||
|
|
||||||
int combinedSize = pendingWrites.size();
|
|
||||||
|
|
||||||
{
|
|
||||||
int cur = 0;
|
|
||||||
while (cur < combinedSize) {
|
|
||||||
int end;
|
|
||||||
|
|
||||||
if (combinedSize - cur < maxChunkSize)
|
|
||||||
end = combinedSize;
|
|
||||||
else if (targetChunkAlignment < 0)
|
|
||||||
end = combinedSize;
|
|
||||||
else
|
|
||||||
end = Math.min(cur + targetChunkSize, combinedSize);
|
|
||||||
|
|
||||||
var thisChunk = pendingWrites.substring(cur, end);
|
|
||||||
|
|
||||||
ChunkData newChunkData = createChunk(thisChunk);
|
|
||||||
newChunks.put(start, newChunkData.key());
|
|
||||||
|
|
||||||
start += thisChunk.size();
|
|
||||||
cur = end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (var e : removedChunks.entrySet()) {
|
|
||||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
|
||||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (var e : newChunks.entrySet()) {
|
|
||||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
|
||||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteTx.putData(file.withCurrentMTime());
|
|
||||||
|
|
||||||
return (long) data.size();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Boolean truncate(JObjectKey fileUuid, long length) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
if (length < 0)
|
|
||||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
|
||||||
|
|
||||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
|
||||||
if (file == null) {
|
|
||||||
Log.error("File not found when trying to write: " + fileUuid);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (length == 0) {
|
|
||||||
jMapHelper.deleteAll(file);
|
|
||||||
remoteTx.putData(file);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
var curSize = size(fileUuid);
|
|
||||||
if (curSize == length) return true;
|
|
||||||
|
|
||||||
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
|
||||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
|
||||||
|
|
||||||
if (curSize < length) {
|
|
||||||
fillZeros(curSize, length, newChunks);
|
|
||||||
} else {
|
|
||||||
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
|
|
||||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
|
||||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) {
|
|
||||||
last = it.hasNext() ? it.next() : null;
|
|
||||||
while (it.hasNext()) {
|
|
||||||
var next = it.next();
|
|
||||||
removedChunks.put(next.getKey().key(), next.getValue().ref());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
removedChunks.put(last.getKey().key(), last.getValue().ref());
|
|
||||||
//
|
|
||||||
// NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
|
||||||
//
|
|
||||||
// long start = 0;
|
|
||||||
//
|
|
||||||
// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
|
||||||
// first = it.hasNext() ? it.next() : null;
|
|
||||||
// boolean empty = last == null;
|
|
||||||
// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
|
|
||||||
// first = null;
|
|
||||||
// last = null;
|
|
||||||
// start = offset;
|
|
||||||
// } else if (!empty) {
|
|
||||||
// assert first != null;
|
|
||||||
// removedChunks.put(first.getKey().key(), first.getValue().ref());
|
|
||||||
// while (it.hasNext() && it.peekNextKey() != last.getKey()) {
|
|
||||||
// var next = it.next();
|
|
||||||
// removedChunks.put(next.getKey().key(), next.getValue().ref());
|
|
||||||
// }
|
|
||||||
// removedChunks.put(last.getKey().key(), last.getValue().ref());
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// var tail = chunksAll.lowerEntry(length);
|
|
||||||
// var afterTail = chunksAll.tailMap(tail.getKey(), false);
|
|
||||||
//
|
|
||||||
// removedChunks.put(tail.getKey(), tail.getValue());
|
|
||||||
// removedChunks.putAll(afterTail);
|
|
||||||
|
|
||||||
var tailBytes = readChunk(last.getValue().ref());
|
|
||||||
var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key()));
|
|
||||||
|
|
||||||
ChunkData newChunkData = createChunk(newChunk);
|
|
||||||
newChunks.put(last.getKey().key(), newChunkData.key());
|
|
||||||
}
|
|
||||||
|
|
||||||
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
|
|
||||||
|
|
||||||
for (var e : removedChunks.entrySet()) {
|
|
||||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
|
||||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (var e : newChunks.entrySet()) {
|
|
||||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
|
||||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteTx.putData(file.withCurrentMTime());
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
|
|
||||||
long combinedSize = (length - fillStart);
|
|
||||||
|
|
||||||
long start = fillStart;
|
|
||||||
|
|
||||||
// Hack
|
|
||||||
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
|
|
||||||
|
|
||||||
{
|
|
||||||
long cur = 0;
|
|
||||||
while (cur < combinedSize) {
|
|
||||||
long end;
|
|
||||||
|
|
||||||
if (targetChunkSize <= 0)
|
|
||||||
end = combinedSize;
|
|
||||||
else {
|
|
||||||
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
|
||||||
end = cur + targetChunkSize;
|
|
||||||
} else {
|
|
||||||
end = combinedSize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!zeroCache.containsKey(end - cur))
|
|
||||||
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
|
|
||||||
|
|
||||||
ChunkData newChunkData = zeroCache.get(end - cur);
|
|
||||||
newChunks.put(start, newChunkData.key());
|
|
||||||
|
|
||||||
start += newChunkData.data().size();
|
|
||||||
cur = end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String readlink(JObjectKey uuid) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
return readlinkBS(uuid).toStringUtf8();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ByteString readlinkBS(JObjectKey uuid) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
|
|
||||||
return read(uuid, 0, Math.toIntExact(size(uuid)));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public JObjectKey symlink(String oldpath, String newpath) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
Path path = Path.of(newpath);
|
|
||||||
var parent = getDirEntryW(path.getParent().toString());
|
|
||||||
|
|
||||||
ensureDir(parent);
|
|
||||||
|
|
||||||
String fname = path.getFileName().toString();
|
|
||||||
|
|
||||||
var fuuid = UUID.randomUUID();
|
|
||||||
Log.debug("Creating file " + fuuid);
|
|
||||||
|
|
||||||
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
|
|
||||||
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
|
|
||||||
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
|
|
||||||
|
|
||||||
remoteTx.putData(f);
|
|
||||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
|
||||||
return f.key();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
|
||||||
|
|
||||||
// FIXME:
|
|
||||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
|
||||||
return true;
|
|
||||||
} else if (dent instanceof RemoteObjectMeta) {
|
|
||||||
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
|
|
||||||
if (remote instanceof File f) {
|
|
||||||
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
throw new IllegalArgumentException(fileUuid + " is not a file");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw new IllegalArgumentException(fileUuid + " is not a file");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long size(JObjectKey fileUuid) {
|
|
||||||
return jObjectTxManager.executeTx(() -> {
|
|
||||||
long realSize = 0;
|
|
||||||
var file = remoteTx.getData(File.class, fileUuid)
|
|
||||||
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
|
|
||||||
|
|
||||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
|
||||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
|
|
||||||
last = it.hasNext() ? it.next() : null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (last != null) {
|
|
||||||
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
|
|
||||||
}
|
|
||||||
|
|
||||||
return realSize;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -30,7 +30,7 @@ public class TestDataCleaner {
|
|||||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||||
}
|
}
|
||||||
|
|
||||||
void purgeDirectory(File dir) {
|
public void purgeDirectory(File dir) {
|
||||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||||
if (file.isDirectory())
|
if (file.isDirectory())
|
||||||
purgeDirectory(file);
|
purgeDirectory(file);
|
||||||
|
|||||||
@@ -139,16 +139,13 @@
|
|||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-failsafe-plugin</artifactId>
|
<artifactId>maven-failsafe-plugin</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
|
<forkCount>1C</forkCount>
|
||||||
|
<reuseForks>false</reuseForks>
|
||||||
|
<parallel>classes</parallel>
|
||||||
<systemPropertyVariables>
|
<systemPropertyVariables>
|
||||||
<junit.jupiter.execution.parallel.enabled>
|
<junit.jupiter.execution.parallel.enabled>
|
||||||
true
|
false
|
||||||
</junit.jupiter.execution.parallel.enabled>
|
</junit.jupiter.execution.parallel.enabled>
|
||||||
<junit.jupiter.execution.parallel.mode.default>
|
|
||||||
concurrent
|
|
||||||
</junit.jupiter.execution.parallel.mode.default>
|
|
||||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
|
||||||
0.5
|
|
||||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
|
||||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||||
</systemPropertyVariables>
|
</systemPropertyVariables>
|
||||||
|
|||||||
@@ -23,14 +23,11 @@ import jnr.ffi.Pointer;
|
|||||||
import jnr.ffi.Runtime;
|
import jnr.ffi.Runtime;
|
||||||
import jnr.ffi.Struct;
|
import jnr.ffi.Struct;
|
||||||
import jnr.ffi.types.off_t;
|
import jnr.ffi.types.off_t;
|
||||||
import jnr.ffi.types.size_t;
|
|
||||||
import org.apache.commons.lang3.SystemUtils;
|
import org.apache.commons.lang3.SystemUtils;
|
||||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||||
import ru.serce.jnrfuse.ErrorCodes;
|
import ru.serce.jnrfuse.ErrorCodes;
|
||||||
import ru.serce.jnrfuse.FuseFillDir;
|
import ru.serce.jnrfuse.FuseFillDir;
|
||||||
import ru.serce.jnrfuse.FuseStubFS;
|
import ru.serce.jnrfuse.FuseStubFS;
|
||||||
import ru.serce.jnrfuse.NotImplemented;
|
|
||||||
import ru.serce.jnrfuse.flags.FuseBufFlags;
|
|
||||||
import ru.serce.jnrfuse.struct.*;
|
import ru.serce.jnrfuse.struct.*;
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.usatiuk.dhfsapp;
|
package com.usatiuk.dhfsfuse;
|
||||||
|
|
||||||
import io.quarkus.runtime.Quarkus;
|
import io.quarkus.runtime.Quarkus;
|
||||||
import io.quarkus.runtime.QuarkusApplication;
|
import io.quarkus.runtime.QuarkusApplication;
|
||||||
@@ -14,8 +14,9 @@ dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
|||||||
dhfs.fuse.debug=false
|
dhfs.fuse.debug=false
|
||||||
dhfs.fuse.enabled=true
|
dhfs.fuse.enabled=true
|
||||||
dhfs.files.allow_recursive_delete=false
|
dhfs.files.allow_recursive_delete=false
|
||||||
dhfs.files.target_chunk_size=2097152
|
dhfs.files.target_chunk_size=524288
|
||||||
dhfs.files.target_chunk_alignment=19
|
dhfs.files.max_chunk_size=524288
|
||||||
|
dhfs.files.target_chunk_alignment=17
|
||||||
dhfs.objects.deletion.delay=1000
|
dhfs.objects.deletion.delay=1000
|
||||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||||
dhfs.objects.ref_verification=true
|
dhfs.objects.ref_verification=true
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ public class TestDataCleaner {
|
|||||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||||
}
|
}
|
||||||
|
|
||||||
void purgeDirectory(File dir) {
|
public static void purgeDirectory(File dir) {
|
||||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||||
if (file.isDirectory())
|
if (file.isDirectory())
|
||||||
purgeDirectory(file);
|
purgeDirectory(file);
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.usatiuk.dhfsapp.integration;
|
package com.usatiuk.dhfsfuse.integration;
|
||||||
|
|
||||||
import com.github.dockerjava.api.model.Device;
|
import com.github.dockerjava.api.model.Device;
|
||||||
import io.quarkus.logging.Log;
|
import io.quarkus.logging.Log;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.usatiuk.dhfsapp.integration;
|
package com.usatiuk.dhfsfuse.integration;
|
||||||
|
|
||||||
import com.github.dockerjava.api.model.Device;
|
import com.github.dockerjava.api.model.Device;
|
||||||
import io.quarkus.logging.Log;
|
import io.quarkus.logging.Log;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.usatiuk.dhfsapp.integration;
|
package com.usatiuk.dhfsfuse.integration;
|
||||||
|
|
||||||
import io.quarkus.logging.Log;
|
import io.quarkus.logging.Log;
|
||||||
import org.jetbrains.annotations.NotNull;
|
import org.jetbrains.annotations.NotNull;
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
package com.usatiuk.dhfsapp.integration;
|
package com.usatiuk.dhfsfuse.integration;
|
||||||
|
|
||||||
import com.github.dockerjava.api.model.Device;
|
import com.github.dockerjava.api.model.Device;
|
||||||
import com.usatiuk.dhfsapp.TestDataCleaner;
|
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||||
import io.quarkus.logging.Log;
|
import io.quarkus.logging.Log;
|
||||||
import org.junit.jupiter.api.*;
|
import org.junit.jupiter.api.*;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.usatiuk.dhfsapp.integration;
|
package com.usatiuk.dhfsfuse.integration;
|
||||||
|
|
||||||
import io.quarkus.logging.Log;
|
import io.quarkus.logging.Log;
|
||||||
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
package com.usatiuk.dhfsapp.integration;
|
package com.usatiuk.dhfsfuse.integration;
|
||||||
|
|
||||||
import com.github.dockerjava.api.model.Device;
|
import com.github.dockerjava.api.model.Device;
|
||||||
import com.usatiuk.dhfsapp.TestDataCleaner;
|
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||||
import io.quarkus.logging.Log;
|
import io.quarkus.logging.Log;
|
||||||
import org.junit.jupiter.api.*;
|
import org.junit.jupiter.api.*;
|
||||||
import org.junit.jupiter.params.ParameterizedTest;
|
import org.junit.jupiter.params.ParameterizedTest;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.usatiuk.dhfsapp.integration;
|
package com.usatiuk.dhfsfuse.integration;
|
||||||
|
|
||||||
import com.github.dockerjava.api.model.Device;
|
import com.github.dockerjava.api.model.Device;
|
||||||
import org.junit.jupiter.api.*;
|
import org.junit.jupiter.api.*;
|
||||||
@@ -14,7 +14,6 @@
|
|||||||
<module>sync-base</module>
|
<module>sync-base</module>
|
||||||
<module>dhfs-fs</module>
|
<module>dhfs-fs</module>
|
||||||
<module>dhfs-fuse</module>
|
<module>dhfs-fuse</module>
|
||||||
<module>dhfs-app</module>
|
|
||||||
<module>kleppmanntree</module>
|
<module>kleppmanntree</module>
|
||||||
<module>objects</module>
|
<module>objects</module>
|
||||||
<module>utils</module>
|
<module>utils</module>
|
||||||
|
|||||||
Reference in New Issue
Block a user