1 Commits

Author SHA1 Message Date
5cd0e5f045 iterator flattening 2025-04-19 13:51:28 +02:00
276 changed files with 5112 additions and 4880 deletions

View File

@@ -20,21 +20,26 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: "recursive"
- name: Install sudo for ACT
run: apt-get update && apt-get install -y sudo
if: env.ACT=='true'
- name: Install FUSE
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
- name: Install fuse and maven
run: sudo apt-get update && sudo apt-get install -y libfuse2
- name: User allow other for fuse
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
- name: Download maven
run: |
cd "$HOME"
mkdir maven-bin
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
- name: Dump fuse.conf
run: cat /etc/fuse.conf
- name: Maven info
run: |
echo $GITHUB_PATH
echo $PATH
mvn -v
- name: Set up JDK 21
uses: actions/setup-java@v4
@@ -43,11 +48,8 @@ jobs:
distribution: "zulu"
cache: maven
- name: Build LazyFS
run: cd thirdparty/lazyfs/ && ./build.sh
- name: Test with Maven
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify javadoc:aggregate
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
# - name: Build with Maven
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
@@ -55,12 +57,7 @@ jobs:
- uses: actions/upload-artifact@v4
with:
name: DHFS Server Package
path: dhfs-parent/dhfs-fuse/target/quarkus-app
- uses: actions/upload-artifact@v4
with:
name: DHFS Javadocs
path: dhfs-parent/target/reports/apidocs/
path: dhfs-parent/dhfs-app/target/quarkus-app
- uses: actions/upload-artifact@v4
if: ${{ always() }}
@@ -217,7 +214,7 @@ jobs:
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
- name: Copy DHFS
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
- name: Copy Webui
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
@@ -236,37 +233,3 @@ jobs:
with:
name: Run wrapper
path: ~/run-wrapper.tar.gz
publish-javadoc:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
permissions:
contents: read
pages: write
id-token: write
needs: [build-webui, build-dhfs]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: DHFS Javadocs
path: dhfs-javadocs-downloaded
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
# Upload entire repository
path: 'dhfs-javadocs-downloaded'
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

3
.gitmodules vendored
View File

@@ -1,3 +0,0 @@
[submodule "thirdparty/lazyfs/lazyfs"]
path = thirdparty/lazyfs/lazyfs
url = git@github.com:dsrhaslab/lazyfs.git

View File

@@ -14,9 +14,6 @@ Syncthing and allowing you to stream your files like Google Drive File Stream
This is a simple wrapper around the jar/web ui distribution that allows you to run/stop
the DHFS server in the background, and update itself (hopefully!)
## How to use it?
## How to use it and how it works?
Unpack the run-wrapper and run the `run` script. The filesystem should be mounted to the `fuse` folder in the run-wrapper root directory.
Then, a web interface will be available at `losthost:8080`, that can be used to connect with other peers.
TODO 😁

View File

@@ -41,5 +41,3 @@ nb-configuration.xml
# Plugin directory
/.quarkus/cli/plugins/
.jqwik-database

View File

@@ -1,11 +1,11 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.app.Main" />
<module name="dhfs-app" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:8080:9011" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>

View File

@@ -1,11 +1,11 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.app.Main" />
<module name="dhfs-app" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions --enable-preview -XX:+UseParallelGC -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=8080 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>

View File

@@ -0,0 +1,5 @@
*
!target/*-runner
!target/*-runner.jar
!target/lib/*
!target/quarkus-app/*

43
dhfs-parent/dhfs-app/.gitignore vendored Normal file
View File

@@ -0,0 +1,43 @@
#Maven
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
release.properties
.flattened-pom.xml
# Eclipse
.project
.classpath
.settings/
bin/
# IntelliJ
.idea
*.ipr
*.iml
*.iws
# NetBeans
nb-configuration.xml
# Visual Studio Code
.vscode
.factorypath
# OSX
.DS_Store
# Vim
*.swp
*.swo
# patch
*.orig
*.rej
# Local environment
.env
# Plugin directory
/.quarkus/cli/plugins/

View File

@@ -0,0 +1,2 @@
FROM azul/zulu-openjdk-debian:21-jre-latest
RUN apt update && apt install -y libfuse2 curl

View File

@@ -0,0 +1,43 @@
version: "3.2"
services:
dhfs1:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs1:/dhfs_root
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-jar /app/quarkus-run.jar"
ports:
- 8080:8080
- 8081:8443
- 5005:5005
dhfs2:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs2:/dhfs_root
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
--add-opens=java.base/java.nio=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
-jar /app/quarkus-run.jar"
ports:
- 8090:8080
- 8091:8443
- 5010:5010

View File

@@ -0,0 +1,210 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-app</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.SerCeMan</groupId>
<artifactId>jnr-fuse</artifactId>
<version>44ed40f8ce</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-ffi</artifactId>
<version>2.2.16</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-posix</artifactId>
<version>3.1.19</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-constants</artifactId>
<version>0.10.4</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>kleppmanntree</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>objects</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fuse</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>sync-base</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
true
</junit.jupiter.execution.parallel.enabled>
<junit.jupiter.execution.parallel.mode.default>
concurrent
</junit.jupiter.execution.parallel.mode.default>
<junit.jupiter.execution.parallel.config.dynamic.factor>
0.5
</junit.jupiter.execution.parallel.config.dynamic.factor>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,97 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
# We make four distinct layers so if there are application changes the library layers can be re-used
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
COPY --chown=185 target/quarkus-app/*.jar /deployments/
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -0,0 +1,93 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
COPY target/lib/* /deployments/lib/
COPY target/*-runner.jar /deployments/quarkus-run.jar
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -0,0 +1,27 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -0,0 +1,30 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
# It uses a micro base image, tuned for Quarkus native executables.
# It reduces the size of the resulting container image.
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM quay.io/quarkus/quarkus-micro-image:2.0
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfuse;
package com.usatiuk.dhfs.app;
import io.quarkus.runtime.Quarkus;
import io.quarkus.runtime.QuarkusApplication;

View File

@@ -0,0 +1,34 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.port=42069
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.peerdiscovery.broadcast=true
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.objects.reconnect_interval=5s
dhfs.objects.write_log=false
dhfs.objects.periodic-push-op-interval=5m
dhfs.fuse.root=${HOME}/dhfs_default/fuse
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=2097152
dhfs.files.target_chunk_alignment=19
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.files.use_hash_for_chunks=false
dhfs.objects.autosync.threads=16
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=16
dhfs.objects.ref-processor.threads=16
dhfs.objects.opsender.batch-size=100
dhfs.objects.lock_timeout_secs=2
dhfs.local-discovery=true
dhfs.peerdiscovery.timeout=10000
quarkus.log.category."com.usatiuk".min-level=TRACE
quarkus.log.category."com.usatiuk".level=TRACE
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfs;
package com.usatiuk.dhfs;
import io.quarkus.test.junit.QuarkusTestProfile;

View File

@@ -0,0 +1,44 @@
package com.usatiuk.dhfs;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
public static void purgeDirectory(File dir) {
try {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
} catch (Exception e) {
Log.error("Couldn't purge directory " + dir, e);
}
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfuse.integration;
package com.usatiuk.dhfs.integration;
import com.github.dockerjava.api.model.Device;
import io.quarkus.logging.Log;
@@ -32,11 +32,9 @@ public class DhfsFuseIT {
String c1uuid;
String c2uuid;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
network = Network.newNetwork();
Network network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
@@ -67,39 +65,22 @@ public class DhfsFuseIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
private void checkConsistency() {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2);
});
}
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
@@ -168,6 +149,35 @@ public class DhfsFuseIT {
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
// TODO: How this fits with the tree?
@Test
@Disabled
void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1);
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void deleteTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
@@ -192,28 +202,6 @@ public class DhfsFuseIT {
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void deleteTestKickedOut() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("kicked"), 60, TimeUnit.SECONDS, 1);
Log.info("Deleting");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
Log.info("Deleted");
// FIXME?
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() ->
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
Log.info("Creating");
@@ -257,8 +245,8 @@ public class DhfsFuseIT {
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request DELETE " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /dhfs_test/fuse/newfile1").getExitCode());
@@ -273,8 +261,8 @@ public class DhfsFuseIT {
container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
@@ -318,33 +306,6 @@ public class DhfsFuseIT {
});
}
@Test
void dirConflictTest2() throws IOException, InterruptedException, TimeoutException {
var client = DockerClientFactory.instance().client();
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo fdsaio >> /dhfs_test/fuse/a/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo exgrg >> /dhfs_test/fuse/a/testf").getExitCode());
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
Log.warn("Waiting for connections");
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
Log.warn("Connected");
checkConsistency();
var ls1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/a*/*");
Assertions.assertTrue(ls1.getStdout().contains("fdsaio"));
Assertions.assertTrue(ls1.getStdout().contains("exgrg"));
}
@Test
void dirCycleTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfuse.integration;
package com.usatiuk.dhfs.integration;
import com.github.dockerjava.api.model.Device;
import io.quarkus.logging.Log;
@@ -35,15 +35,13 @@ public class DhfsFusex3IT {
String c2uuid;
String c3uuid;
Network network;
// This calculation is somewhat racy, so keep it hardcoded for now
long emptyFileCount = 9;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
// TODO: Dedup
network = Network.newNetwork();
Network network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
@@ -93,26 +91,26 @@ public class DhfsFusex3IT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
var c2curl1 = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
var c2curl3 = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c3uuid);
" --data '{\"uuid\":\"" + c3uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
var c3curl = container3.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
@@ -133,7 +131,6 @@ public class DhfsFusex3IT {
@AfterEach
void stop() {
Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
@@ -193,8 +190,8 @@ public class DhfsFusex3IT {
var c3curl = container3.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request DELETE " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
Thread.sleep(10000);
@@ -255,22 +252,21 @@ public class DhfsFusex3IT {
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
var client = DockerClientFactory.instance().client();
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
client.pauseContainerCmd(container1.getContainerId()).exec();
client.pauseContainerCmd(container2.getContainerId()).exec();
// Pauses needed as otherwise docker buffers some incoming packets
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
client.pauseContainerCmd(container3.getContainerId()).exec();
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
client.unpauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container3.getContainerId()).exec();
Log.warn("Waiting for connections");
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfuse.integration;
package com.usatiuk.dhfs.integration;
import io.quarkus.logging.Log;
import org.jetbrains.annotations.NotNull;
@@ -66,25 +66,21 @@ public class DhfsImage implements Future<String> {
.run("apt update && apt install -y libfuse2 curl gcc")
.copy("/app", "/app")
.copy("/libs", "/libs")
.cmd("java", "-ea", "-Xmx256M", "-XX:TieredStopAtLevel=1", "-XX:+UseParallelGC",
.cmd("java", "-ea", "-Xmx128M",
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED",
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED",
"--add-opens=java.base/java.nio=ALL-UNNAMED",
"--enable-preview",
"-Ddhfs.objects.peerdiscovery.interval=1s",
"-Ddhfs.objects.invalidation.delay=100",
"-Ddhfs.objects.deletion.delay=0",
"-Ddhfs.objects.deletion.can-delete-retry-delay=1000",
"-Ddhfs.objects.ref_verification=true",
"-Ddhfs.objects.sync.timeout=30",
"-Ddhfs.objects.write_log=true",
"-Ddhfs.objects.sync.timeout=10",
"-Ddhfs.objects.sync.ping.timeout=5",
"-Ddhfs.objects.reconnect_interval=1s",
"-Ddhfs.objects.last-seen.timeout=30",
"-Ddhfs.objects.last-seen.update=10",
"-Ddhfs.sync.cert-check=false",
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",
"-Dquarkus.log.category.\"com.usatiuk.objects.transaction\".level=INFO",
"-Ddhfs.objects.periodic-push-op-interval=5s",
"-Ddhfs.fuse.root=/dhfs_test/fuse",
"-Ddhfs.objects.persistence.files.root=/dhfs_test/data",

View File

@@ -1,7 +1,7 @@
package com.usatiuk.dhfsfuse.integration;
package com.usatiuk.dhfs.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import com.usatiuk.dhfs.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.slf4j.LoggerFactory;
@@ -18,7 +18,10 @@ import java.nio.file.Files;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
@@ -36,18 +39,12 @@ public class KillIT {
File data1;
File data2;
Network network;
ExecutorService executor;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("").toFile();
data2 = Files.createTempDirectory("").toFile();
network = Network.newNetwork();
Network network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
@@ -81,14 +78,14 @@ public class KillIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
@@ -99,28 +96,11 @@ public class KillIT {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data2);
executor.close();
network.close();
}
private void checkConsistency() {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@Test
void killTest(TestInfo testInfo) throws Exception {
var executor = Executors.newFixedThreadPool(2);
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
@@ -144,11 +124,24 @@ public class KillIT {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2);
});
}
@Test
void killTestDirs(TestInfo testInfo) throws Exception {
var executor = Executors.newFixedThreadPool(2);
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
@@ -172,64 +165,18 @@ public class KillIT {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
@Test
void killTest2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
return ls1.equals(ls2) && cat1.equals(cat2);
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfuse.integration;
package com.usatiuk.dhfs.integration;
import com.github.dockerjava.api.model.Device;
import org.junit.jupiter.api.*;
@@ -29,11 +29,9 @@ public class ResyncIT {
String c1uuid;
String c2uuid;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
network = Network.newNetwork();
Network network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
@@ -57,7 +55,6 @@ public class ResyncIT {
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
@@ -75,14 +72,14 @@ public class ResyncIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
@@ -115,14 +112,14 @@ public class ResyncIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
@@ -155,14 +152,14 @@ public class ResyncIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);

View File

@@ -0,0 +1,11 @@
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false
dhfs.objects.persistence.snapshot-extra-checks=true

View File

@@ -72,6 +72,26 @@
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.SerCeMan</groupId>
<artifactId>jnr-fuse</artifactId>
<version>44ed40f8ce</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-ffi</artifactId>
<version>2.2.16</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-posix</artifactId>
<version>3.1.19</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-constants</artifactId>
<version>0.10.4</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
@@ -102,11 +122,26 @@
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>kleppmanntree</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>objects</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>sync-base</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>

View File

@@ -0,0 +1,13 @@
package com.usatiuk.dhfs.files.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.repository.JDataRemoteDto;
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
@Override
public int estimateSize() {
return data.size();
}
}

View File

@@ -0,0 +1,26 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.ProtoSerializer;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.persistence.ChunkDataP;
import com.usatiuk.dhfs.persistence.JObjectKeyP;
import jakarta.inject.Singleton;
@Singleton
public class ChunkDataProtoSerializer implements ProtoSerializer<ChunkDataP, ChunkData> {
@Override
public ChunkData deserialize(ChunkDataP message) {
return new ChunkData(
JObjectKey.of(message.getKey().getName()),
message.getData()
);
}
@Override
public ChunkDataP serialize(ChunkData object) {
return ChunkDataP.newBuilder()
.setKey(JObjectKeyP.newBuilder().setName(object.key().value()).build())
.setData(object.data())
.build();
}
}

View File

@@ -1,22 +1,14 @@
package com.usatiuk.dhfsfs.objects;
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.jmap.JMapHolder;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.repository.JDataRemoteDto;
import java.util.Collection;
import java.util.Set;
/**
* File is a data structure that represents a file in the file system
* @param key unique key
* @param mode file mode
* @param cTime creation time
* @param mTime modification time
* @param symlink true if the file is a symlink, false otherwise
*/
public record File(JObjectKey key, long mode, long cTime, long mTime,
boolean symlink
) implements JDataRemote, JMapHolder<JMapLongKey> {
@@ -36,10 +28,6 @@ public record File(JObjectKey key, long mode, long cTime, long mTime,
return new File(key, mode, cTime, mTime, symlink);
}
public File withCurrentMTime() {
return new File(key, mode, cTime, System.currentTimeMillis(), symlink);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return Set.of();

View File

@@ -0,0 +1,15 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.repository.JDataRemoteDto;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
@Override
public Class<? extends JDataRemote> objClass() {
return File.class;
}
}

View File

@@ -1,13 +1,10 @@
package com.usatiuk.dhfsfs.objects;
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.syncmap.DtoMapper;
import com.usatiuk.dhfs.repository.syncmap.DtoMapper;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
/**
* Maps a {@link File} object to a {@link FileDto} object and vice versa.
*/
@ApplicationScoped
public class FileDtoMapper implements DtoMapper<File, FileDto> {
@Inject

View File

@@ -1,8 +1,8 @@
package com.usatiuk.dhfsfs.objects;
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.objects.JObjectKey;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
@@ -10,20 +10,11 @@ import org.apache.commons.lang3.tuple.Pair;
import java.util.ArrayList;
import java.util.List;
/**
* Helper class for working with files.
*/
@ApplicationScoped
public class FileHelper {
@Inject
JMapHelper jMapHelper;
/**
* Get the chunks of a file.
* Transaction is expected to be already started.
* @param file the file to get chunks from
* @return a list of pairs of chunk offset and chunk key
*/
public List<Pair<Long, JObjectKey>> getChunks(File file) {
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
try (var it = jMapHelper.getIterator(file)) {
@@ -35,13 +26,6 @@ public class FileHelper {
return List.copyOf(chunks);
}
/**
* Replace the chunks of a file.
* All previous chunks will be deleted.
* Transaction is expected to be already started.
* @param file the file to replace chunks in
* @param chunks the list of pairs of chunk offset and chunk key
*/
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
jMapHelper.deleteAll(file);

View File

@@ -0,0 +1,25 @@
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.ProtoSerializer;
import com.usatiuk.dhfs.persistence.FileDtoP;
import com.usatiuk.dhfs.utils.SerializationHelper;
import jakarta.inject.Singleton;
import java.io.IOException;
@Singleton
public class FileProtoSerializer implements ProtoSerializer<FileDtoP, FileDto> {
@Override
public FileDto deserialize(FileDtoP message) {
try (var is = message.getSerializedData().newInput()) {
return SerializationHelper.deserialize(is);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public FileDtoP serialize(FileDto object) {
return FileDtoP.newBuilder().setSerializedData(SerializationHelper.serialize(object)).build();
}
}

View File

@@ -1,14 +1,20 @@
package com.usatiuk.dhfsfs.objects;
package com.usatiuk.dhfs.files.objects;
import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.RemoteObjectDataWrapper;
import com.usatiuk.dhfs.RemoteObjectMeta;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.peersync.PeerId;
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
import com.usatiuk.dhfs.remoteobj.*;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.dhfs.repository.ObjSyncHandler;
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
import com.usatiuk.dhfs.repository.SyncHelper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -23,9 +29,6 @@ import javax.annotation.Nullable;
import java.util.List;
import java.util.Objects;
/**
* Handles synchronization of file objects.
*/
@ApplicationScoped
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
@Inject
@@ -44,18 +47,14 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
@Inject
DhfsFileService fileService;
private JKleppmannTreeManager.JKleppmannTree getTree() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs")).orElseThrow();
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
}
/**
* Resolve conflict between two file versions, update the file in storage and create a conflict file.
*
* @param from the peer that sent the update
* @param key the key of the file
* @param receivedChangelog the changelog of the received file
* @param receivedData the received file data
*/
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
@Nullable FileDto receivedData) {
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
@@ -137,12 +136,12 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
do {
try {
getTree().move(parent.getRight(),
getTreeW().move(parent.getRight(),
new JKleppmannTreeNodeMetaFile(
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
newFile.key()
),
getTree().getNewNodeId()
getTreeW().getNewNodeId()
);
} catch (AlreadyExistsException aex) {
i++;

View File

@@ -0,0 +1,48 @@
package com.usatiuk.dhfs.files.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.objects.JObjectKey;
import org.apache.commons.lang3.tuple.Pair;
import java.util.Optional;
public interface DhfsFileService {
Optional<JObjectKey> open(String name);
Optional<JObjectKey> create(String name, long mode);
Pair<String, JObjectKey> inoToParent(JObjectKey ino);
void mkdir(String name, long mode);
Optional<GetattrRes> getattr(JObjectKey name);
Boolean chmod(JObjectKey name, long mode);
void unlink(String name);
Boolean rename(String from, String to);
Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs);
Iterable<String> readDir(String name);
long size(JObjectKey fileUuid);
Optional<ByteString> read(JObjectKey fileUuid, long offset, int length);
Long write(JObjectKey fileUuid, long offset, ByteString data);
default Long write(JObjectKey fileUuid, long offset, byte[] data) {
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
}
Boolean truncate(JObjectKey fileUuid, long length);
String readlink(JObjectKey uuid);
ByteString readlinkBS(JObjectKey uuid);
JObjectKey symlink(String oldpath, String newpath);
}

View File

@@ -1,27 +1,27 @@
package com.usatiuk.dhfsfs.service;
package com.usatiuk.dhfs.files.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.dhfs.RemoteObjectMeta;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.files.objects.ChunkData;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.jmap.JMapEntry;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfsfs.objects.ChunkData;
import com.usatiuk.dhfsfs.objects.File;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -39,77 +39,80 @@ import java.nio.file.Path;
import java.util.*;
import java.util.stream.StreamSupport;
/**
* Actual filesystem implementation.
*/
@ApplicationScoped
public class DhfsFileService {
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
int maxChunkSize;
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
boolean useHashForChunks;
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
boolean allowRecursiveDelete;
@ConfigProperty(name = "dhfs.objects.ref_verification")
boolean refVerification;
@ConfigProperty(name = "dhfs.objects.write_log")
boolean writeLogging;
public class DhfsFileServiceImpl implements DhfsFileService {
@Inject
Transaction curTx;
@Inject
RemoteTransaction remoteTx;
@Inject
TransactionManager jObjectTxManager;
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
boolean useHashForChunks;
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
boolean allowRecursiveDelete;
@ConfigProperty(name = "dhfs.objects.ref_verification")
boolean refVerification;
@ConfigProperty(name = "dhfs.objects.write_log")
boolean writeLogging;
@Inject
JKleppmannTreeManager jKleppmannTreeManager;
@Inject
JMapHelper jMapHelper;
private JKleppmannTreeManager.JKleppmannTree getTree() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
}
/**
* Create a new chunk with the given data and a new unique ID.
*
* @param bytes the data to store in the chunk
* @return the created chunk
*/
private ChunkData createChunk(ByteString bytes) {
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
remoteTx.putDataNew(newChunk);
remoteTx.putData(newChunk);
return newChunk;
}
void init(@Observes @Priority(500) StartupEvent event) {
Log.info("Initializing file service");
getTree();
getTreeW();
}
private JKleppmannTreeNode getDirEntry(String name) {
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
private JKleppmannTreeNode getDirEntryW(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private JKleppmannTreeNode getDirEntryR(String name) {
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) return Optional.empty();
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
var ret = curTx.get(JKleppmannTreeNode.class, res);
return ret;
}
/**
* Get the attributes of a file or directory.
* @param uuid the UUID of the file or directory
* @return the attributes of the file or directory
*/
@Override
public Optional<GetattrRes> getattr(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var ref = curTx.get(JData.class, uuid).orElse(null);
@@ -122,7 +125,7 @@ public class DhfsFileService {
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
} else if (ref instanceof JKleppmannTreeNodeHolder) {
} else if (ref instanceof JKleppmannTreeNode) {
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
@@ -131,17 +134,13 @@ public class DhfsFileService {
});
}
/**
* Try to resolve a path to a file or directory.
* @param name the path to resolve
* @return the key of the file or directory, or an empty optional if it does not exist
*/
@Override
public Optional<JObjectKey> open(String name) {
return jObjectTxManager.executeTx(() -> {
try {
var ret = getDirEntry(name);
var ret = getDirEntryR(name);
return switch (ret.meta()) {
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno());
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
default -> Optional.empty();
};
@@ -159,16 +158,11 @@ public class DhfsFileService {
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
}
/**
* Create a new file with the given name and mode.
* @param name the name of the file
* @param mode the mode of the file
* @return the key of the created file
*/
@Override
public Optional<JObjectKey> create(String name, long mode) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntry(path.getParent().toString());
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
@@ -180,7 +174,7 @@ public class DhfsFileService {
remoteTx.putData(f);
try {
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
} catch (Exception e) {
// fobj.getMeta().removeRef(newNodeId);
throw e;
@@ -189,96 +183,71 @@ public class DhfsFileService {
});
}
/**
* Get the parent directory of a file or directory.
* @param ino the key of the file or directory
* @return the parent directory
*/
//FIXME: Slow..
@Override
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
return jObjectTxManager.executeTx(() -> {
// FIXME: Slow
return getTree().findParent(w -> {
return getTreeW().findParent(w -> {
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
return f.fileIno().equals(ino);
return f.getFileIno().equals(ino);
return false;
});
});
}
/**
* Create a new directory with the given name and mode.
* @param name the name of the directory
* @param mode the mode of the directory
*/
@Override
public void mkdir(String name, long mode) {
jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntry(path.getParent().toString());
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String dname = path.getFileName().toString();
Log.debug("Creating directory " + name);
// TODO: No modes for directories yet
getTree().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId());
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
});
}
/**
* Unlink a file or directory.
* @param name the name of the file or directory
* @throws DirectoryNotEmptyException if the directory is not empty and recursive delete is not allowed
*/
@Override
public void unlink(String name) {
jObjectTxManager.executeTx(() -> {
var node = getDirEntryOpt(name).orElse(null);
if (node == null)
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
if (!allowRecursiveDelete && !node.children().isEmpty())
throw new DirectoryNotEmptyException();
}
getTree().trash(node.meta(), node.key());
getTreeW().trash(node.meta(), node.key());
});
}
/**
* Rename a file or directory.
* @param from the old name
* @param to the new name
* @return true if the rename was successful, false otherwise
*/
@Override
public Boolean rename(String from, String to) {
return jObjectTxManager.executeTx(() -> {
var node = getDirEntry(from);
var node = getDirEntryW(from);
JKleppmannTreeNodeMeta meta = node.meta();
var toPath = Path.of(to);
var toDentry = getDirEntry(toPath.getParent().toString());
var toDentry = getDirEntryW(toPath.getParent().toString());
ensureDir(toDentry);
getTree().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
return true;
});
}
/**
* Change the mode of a file or directory.
* @param uuid the ID of the file or directory
* @param mode the new mode
* @return true if the mode was changed successfully, false otherwise
*/
@Override
public Boolean chmod(JObjectKey uuid, long mode) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
if (dent instanceof JKleppmannTreeNodeHolder) {
if (dent instanceof JKleppmannTreeNode) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withMode(mode).withCurrentMTime());
remoteTx.putData(f.withMode(mode).withMTime(System.currentTimeMillis()));
return true;
} else {
throw new IllegalArgumentException(uuid + " is not a file");
@@ -289,14 +258,10 @@ public class DhfsFileService {
});
}
/**
* Read the contents of a directory.
* @param name the path of the directory
* @return an iterable of the names of the files in the directory
*/
@Override
public Iterable<String> readDir(String name) {
return jObjectTxManager.executeTx(() -> {
var found = getDirEntry(name);
var found = getDirEntryW(name);
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
@@ -305,14 +270,8 @@ public class DhfsFileService {
});
}
/**
* Read the contents of a file.
* @param fileUuid the ID of the file
* @param offset the offset to start reading from
* @param length the number of bytes to read
* @return the contents of the file as a ByteString
*/
public ByteString read(JObjectKey fileUuid, long offset, int length) {
@Override
public Optional<ByteString> read(JObjectKey fileUuid, long offset, int length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
@@ -322,12 +281,12 @@ public class DhfsFileService {
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
return Optional.empty();
}
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
if (!it.hasNext())
return ByteString.empty();
return Optional.of(ByteString.empty());
// if (it.peekNextKey().key() != offset) {
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
@@ -365,19 +324,14 @@ public class DhfsFileService {
chunk = it.next();
}
return buf;
return Optional.of(buf);
} catch (Exception e) {
Log.error("Error reading file: " + fileUuid, e);
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
return Optional.empty();
}
});
}
/**
* Get the size of a file.
* @param uuid the ID of the file
* @return the size of the file
*/
private ByteString readChunk(JObjectKey uuid) {
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
@@ -389,11 +343,6 @@ public class DhfsFileService {
return chunkRead.data();
}
/**
* Get the size of a chunk.
* @param uuid the ID of the chunk
* @return the size of the chunk
*/
private int getChunkSize(JObjectKey uuid) {
return readChunk(uuid).size();
}
@@ -402,24 +351,24 @@ public class DhfsFileService {
return num & -(1L << n);
}
/**
* Write data to a file.
* @param fileUuid the ID of the file
* @param offset the offset to write to
* @param data the data to write
* @return the number of bytes written
*/
@Override
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
return jObjectTxManager.executeTx(() -> {
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
if (file == null) {
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
Log.error("File not found when trying to write: " + fileUuid);
return -1L;
}
Map<Long, JObjectKey> removedChunks = new HashMap<>();
if (writeLogging) {
Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " "
+ offset + " " + data.size());
}
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
long writeEnd = offset + data.size();
@@ -457,7 +406,7 @@ public class DhfsFileService {
}
Map<Long, JObjectKey> newChunks = new HashMap<>();
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
if (existingEnd < offset) {
if (!pendingPrefix.isEmpty()) {
@@ -474,13 +423,12 @@ public class DhfsFileService {
int combinedSize = pendingWrites.size();
{
int targetChunkSize = 1 << targetChunkAlignment;
int cur = 0;
while (cur < combinedSize) {
int end;
if (combinedSize - cur < maxChunkSize)
end = combinedSize;
else if (targetChunkAlignment < 0)
if (targetChunkAlignment < 0)
end = combinedSize;
else
end = Math.min(cur + targetChunkSize, combinedSize);
@@ -496,27 +444,22 @@ public class DhfsFileService {
}
for (var e : removedChunks.entrySet()) {
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file.withCurrentMTime());
remoteTx.putData(file);
return (long) data.size();
});
}
/**
* Truncate a file to the given length.
* @param fileUuid the ID of the file
* @param length the new length of the file
* @return true if the truncate was successful, false otherwise
*/
@Override
public Boolean truncate(JObjectKey fileUuid, long length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
@@ -592,27 +535,21 @@ public class DhfsFileService {
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
for (var e : removedChunks.entrySet()) {
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file.withCurrentMTime());
remoteTx.putData(file);
return true;
});
}
/**
* Fill the given range with zeroes.
* @param fillStart the start of the range
* @param length the end of the range
* @param newChunks the map to store the new chunks in
*/
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
private void fillZeros(long fillStart, long length, NavigableMap<Long, JObjectKey> newChunks) {
long combinedSize = (length - fillStart);
long start = fillStart;
@@ -647,39 +584,26 @@ public class DhfsFileService {
}
}
/**
* Read the contents of a symlink.
* @param uuid the ID of the symlink
* @return the contents of the symlink as a string
*/
@Override
public String readlink(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
return readlinkBS(uuid).toStringUtf8();
});
}
/**
* Read the contents of a symlink as a ByteString.
* @param uuid the ID of the symlink
* @return the contents of the symlink as a ByteString
*/
@Override
public ByteString readlinkBS(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
return read(uuid, 0, Math.toIntExact(size(uuid)));
return read(uuid, 0, Math.toIntExact(size(uuid))).get();
});
}
/**
* Create a symlink.
* @param oldpath the target of the symlink
* @param newpath the path of the symlink
* @return the key of the created symlink
*/
@Override
public JObjectKey symlink(String oldpath, String newpath) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(newpath);
var parent = getDirEntry(path.getParent().toString());
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
@@ -693,24 +617,18 @@ public class DhfsFileService {
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
remoteTx.putData(f);
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
return f.key();
});
}
/**
* Set the access and modification times of a file.
* @param fileUuid the ID of the file
* @param atimeMs the access time in milliseconds
* @param mtimeMs the modification time in milliseconds
* @return true if the times were set successfully, false otherwise
*/
@Override
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
// FIXME:
if (dent instanceof JKleppmannTreeNodeHolder) {
if (dent instanceof JKleppmannTreeNode) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
@@ -726,11 +644,7 @@ public class DhfsFileService {
});
}
/**
* Get the size of a file.
* @param fileUuid the ID of the file
* @return the size of the file
*/
@Override
public long size(JObjectKey fileUuid) {
return jObjectTxManager.executeTx(() -> {
long realSize = 0;
@@ -749,15 +663,4 @@ public class DhfsFileService {
return realSize;
});
}
/**
* Write data to a file.
* @param fileUuid the ID of the file
* @param offset the offset to write to
* @param data the data to write
* @return the number of bytes written
*/
public Long write(JObjectKey fileUuid, long offset, byte[] data) {
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
}
}

View File

@@ -0,0 +1,8 @@
package com.usatiuk.dhfs.files.service;
public class DirectoryNotEmptyException extends RuntimeException {
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
}

View File

@@ -0,0 +1,4 @@
package com.usatiuk.dhfs.files.service;
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfs.service;
package com.usatiuk.dhfs.files.service;
public enum GetattrType {
FILE,

View File

@@ -1,18 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
/**
* ChunkData is a data structure that represents an immutable binary blob
* @param key unique key
* @param data binary data
*/
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
@Override
public int estimateSize() {
return data.size();
}
}

View File

@@ -1,20 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
/**
* FileDto is a data transfer object that contains a file and its chunks.
* @param file the file
* @param chunks the list of chunks, each represented as a pair of a long and a JObjectKey
*/
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
@Override
public Class<? extends JDataRemote> objClass() {
return File.class;
}
}

View File

@@ -1,22 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
/**
* JKleppmannTreeNodeMetaDirectory is a record that represents a directory in the JKleppmann tree.
* @param name the name of the directory
*/
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaDirectory(name);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of();
}
}

View File

@@ -1,24 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
/**
* JKleppmannTreeNodeMetaFile is a record that represents a file in the JKleppmann tree.
* @param name the name of the file
* @param fileIno a reference to the `File` object
*/
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
@Override
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaFile(name, fileIno);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of(fileIno);
}
}

View File

@@ -1,13 +0,0 @@
package com.usatiuk.dhfsfs.service;
/**
* DirectoryNotEmptyException is thrown when a directory is not empty.
* This exception is used to indicate that a directory cannot be deleted
* because it contains files or subdirectories.
*/
public class DirectoryNotEmptyException extends RuntimeException {
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
}

View File

@@ -1,11 +0,0 @@
package com.usatiuk.dhfsfs.service;
/**
* GetattrRes is a record that represents the result of a getattr operation.
* @param mtime File modification time
* @param ctime File creation time
* @param mode File mode
* @param type File type
*/
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfuse;
package com.usatiuk.dhfs;
import io.quarkus.test.junit.QuarkusTestProfile;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfs;
package com.usatiuk.dhfs;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
@@ -30,7 +30,7 @@ public class TestDataCleaner {
purgeDirectory(Path.of(tempDirectory).toFile());
}
public void purgeDirectory(File dir) {
void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfs.benchmarks;
package com.usatiuk.dhfs.benchmarks;
import io.quarkus.logging.Log;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;

View File

@@ -1,8 +1,8 @@
package com.usatiuk.dhfsfs.benchmarks;
package com.usatiuk.dhfs.benchmarks;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfsfs.TempDataProfile;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.dhfs.TempDataProfile;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.objects.JObjectKey;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfs;
package com.usatiuk.dhfs.files;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -1,8 +1,9 @@
package com.usatiuk.dhfsfs;
package com.usatiuk.dhfs.files;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfsfs.objects.File;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.TempDataProfile;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
@@ -89,7 +90,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
// for (int start = 0; start < all.length(); start++) {
// for (int end = start; end <= all.length(); end++) {
// var read = fileService.read(fuuid.toString(), start, end - start);
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.toByteArray());
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray());
// }
// }
// }
@@ -110,21 +111,17 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
var curMtime = fileService.getattr(uuid).get().mtime();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).get().toByteArray());
fileService.write(uuid, 4, new byte[]{10, 11, 12});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.write(uuid, 10, new byte[]{13, 14});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
fileService.write(uuid, 6, new byte[]{15, 16});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
fileService.write(uuid, 3, new byte[]{17, 18});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
var newMtime = fileService.getattr(uuid).get().mtime();
Assertions.assertTrue(newMtime > curMtime);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
fileService.unlink("/writeTest");
Assertions.assertFalse(fileService.open("/writeTest").isPresent());
@@ -138,7 +135,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.unlink("/removeTest");
Assertions.assertFalse(fileService.open("/removeTest").isPresent());
@@ -152,12 +149,12 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.truncate(uuid, 20);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
}
@RepeatedTest(100)
@@ -169,12 +166,12 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.truncate(uuid, 20);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
} finally {
fileService.unlink("/truncateTest2");
}
@@ -188,10 +185,10 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.truncate(uuid, 7);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray());
}
@Test
@@ -201,14 +198,14 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest"));
Assertions.assertFalse(fileService.open("/moveTest").isPresent());
Assertions.assertTrue(fileService.open("/movedTest").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/movedTest").get(), 0, 10).toByteArray());
fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray());
}
@Test
@@ -221,9 +218,9 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid2 = ret2.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29});
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray());
jObjectTxManager.run(() -> {
@@ -237,7 +234,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).toByteArray());
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray());
// await().atMost(5, TimeUnit.SECONDS).until(() -> {
// jObjectTxManager.run(() -> {
@@ -255,8 +252,8 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray());
}
@Test
@@ -266,13 +263,13 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19});
Assertions.assertArrayEquals(new byte[]{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
}, fileService.read(uuid, 0, 30).toByteArray());
}, fileService.read(uuid, 0, 30).get().toByteArray());
}
@Test
@@ -282,7 +279,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
// var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new);
// var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0);
@@ -297,6 +294,6 @@ public abstract class DhfsFileServiceSimpleTestImpl {
Assertions.assertTrue(fileService.open("/movedTest2").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/movedTest2").get(), 0, 10).toByteArray());
fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray());
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfs;
package com.usatiuk.dhfs.files;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfs;
package com.usatiuk.dhfs.files;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -73,9 +73,24 @@
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.serceman</groupId>
<groupId>com.github.SerCeMan</groupId>
<artifactId>jnr-fuse</artifactId>
<version>0.5.8</version>
<version>44ed40f8ce</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-ffi</artifactId>
<version>2.2.16</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-posix</artifactId>
<version>3.1.19</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-constants</artifactId>
<version>0.10.4</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
@@ -107,11 +122,26 @@
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>kleppmanntree</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>objects</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>sync-base</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
@@ -139,13 +169,16 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>0.5C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
true
</junit.jupiter.execution.parallel.enabled>
<junit.jupiter.execution.parallel.mode.default>
concurrent
</junit.jupiter.execution.parallel.mode.default>
<junit.jupiter.execution.parallel.config.dynamic.factor>
0.5
</junit.jupiter.execution.parallel.config.dynamic.factor>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>

View File

@@ -1,15 +1,12 @@
package com.usatiuk.dhfsfuse;
package com.usatiuk.dhfs.fuse;
import com.google.protobuf.UnsafeByteOperations;
import com.kenai.jffi.MemoryIO;
import com.sun.security.auth.module.UnixSystem;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.dhfsfs.service.DirectoryNotEmptyException;
import com.usatiuk.dhfsfs.service.GetattrRes;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException;
import com.usatiuk.dhfs.files.service.GetattrRes;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.utils.UninitializedByteBuffer;
import com.usatiuk.utils.UnsafeAccessor;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -20,15 +17,15 @@ import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import jnr.ffi.Pointer;
import jnr.ffi.Runtime;
import jnr.ffi.Struct;
import jnr.ffi.types.off_t;
import org.apache.commons.lang3.SystemUtils;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import ru.serce.jnrfuse.ErrorCodes;
import ru.serce.jnrfuse.FuseFillDir;
import ru.serce.jnrfuse.FuseStubFS;
import ru.serce.jnrfuse.struct.*;
import ru.serce.jnrfuse.struct.FileStat;
import ru.serce.jnrfuse.struct.FuseFileInfo;
import ru.serce.jnrfuse.struct.Statvfs;
import ru.serce.jnrfuse.struct.Timespec;
import java.nio.ByteBuffer;
import java.nio.file.Paths;
@@ -43,8 +40,6 @@ import static jnr.posix.FileStat.*;
public class DhfsFuse extends FuseStubFS {
private static final int blksize = 1048576;
private static final int iosize = 1048576;
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
private final AtomicLong _fh = new AtomicLong(1);
@ConfigProperty(name = "dhfs.fuse.root")
String root;
@ConfigProperty(name = "dhfs.fuse.enabled")
@@ -54,8 +49,13 @@ public class DhfsFuse extends FuseStubFS {
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@Inject
JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors;
@Inject
DhfsFileService fileService;
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
private final AtomicLong _fh = new AtomicLong(1);
private long allocateHandle(JObjectKey key) {
while (true) {
var newFh = _fh.getAndIncrement();
@@ -73,48 +73,40 @@ public class DhfsFuse extends FuseStubFS {
void init(@Observes @Priority(100000) StartupEvent event) {
if (!enabled) return;
Paths.get(root).toFile().mkdirs();
if (!Paths.get(root).toFile().isDirectory())
throw new IllegalStateException("Could not create directory " + root);
Log.info("Mounting with root " + root);
var uid = new UnixSystem().getUid();
var gid = new UnixSystem().getGid();
var opts = new ArrayList<String>();
if (SystemUtils.IS_OS_WINDOWS) {
// Assuming macFuse
if (SystemUtils.IS_OS_MAC) {
opts.add("-o");
opts.add("auto_cache");
opts.add("-o");
opts.add("uid=-1");
opts.add("-o");
opts.add("gid=-1");
} else {
Paths.get(root).toFile().mkdirs();
if (!Paths.get(root).toFile().isDirectory())
throw new IllegalStateException("Could not create directory " + root);
var uid = new UnixSystem().getUid();
var gid = new UnixSystem().getGid();
// Assuming macFuse
if (SystemUtils.IS_OS_MAC) {
opts.add("-o");
opts.add("iosize=" + iosize);
} else if (SystemUtils.IS_OS_LINUX) {
// FIXME: There's something else missing: the writes still seem to be 32k max
opts.add("iosize=" + iosize);
} else if (SystemUtils.IS_OS_LINUX) {
// FIXME: There's something else missing: the writes still seem to be 32k max
// opts.add("-o");
// opts.add("large_read");
opts.add("-o");
opts.add("big_writes");
opts.add("-o");
opts.add("max_read=" + iosize);
opts.add("-o");
opts.add("max_write=" + iosize);
}
opts.add("-o");
opts.add("auto_cache");
opts.add("big_writes");
opts.add("-o");
opts.add("uid=" + uid);
opts.add("max_read=" + iosize);
opts.add("-o");
opts.add("gid=" + gid);
opts.add("max_write=" + iosize);
}
opts.add("-o");
opts.add("auto_cache");
opts.add("-o");
opts.add("uid=" + uid);
opts.add("-o");
opts.add("gid=" + gid);
mount(Paths.get(root), false, debug, opts.toArray(String[]::new));
}
@@ -232,8 +224,8 @@ public class DhfsFuse extends FuseStubFS {
var fileKey = getFromHandle(fi.fh.get());
var read = fileService.read(fileKey, offset, (int) size);
if (read.isEmpty()) return 0;
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
return read.size();
UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
return read.get().size();
} catch (Throwable e) {
Log.error("When reading " + path, e);
return -ErrorCodes.EIO();
@@ -242,22 +234,24 @@ public class DhfsFuse extends FuseStubFS {
@Override
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
var buffer = UninitializedByteBuffer.allocate((int) size);
UnsafeAccessor.UNSAFE.copyMemory(
buf.address(),
UnsafeAccessor.NIO.getBufferAddress(buffer),
size
);
return write(path, buffer, offset, fi);
}
public int write(String path, ByteBuffer buffer, long offset, FuseFileInfo fi) {
if (offset < 0) return -ErrorCodes.EINVAL();
try {
var fileKey = getFromHandle(fi.fh.get());
var buffer = ByteBuffer.allocateDirect((int) size);
if (buffer.isDirect()) {
jnrPtrByteOutputAccessors.getUnsafe().copyMemory(
buf.address(),
jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer),
size
);
} else {
buf.get(0, buffer.array(), 0, (int) size);
}
var written = fileService.write(fileKey, offset, UnsafeByteOperations.unsafeWrap(buffer));
return written.intValue();
} catch (Exception e) {
} catch (Throwable e) {
Log.error("When writing " + path, e);
return -ErrorCodes.EIO();
}
@@ -393,7 +387,7 @@ public class DhfsFuse extends FuseStubFS {
var file = fileOpt.get();
var read = fileService.readlinkBS(fileOpt.get());
if (read.isEmpty()) return 0;
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
buf.putByte(Math.min(size - 1, read.size()), (byte) 0);
return 0;
} catch (Throwable e) {
@@ -425,29 +419,4 @@ public class DhfsFuse extends FuseStubFS {
return -ErrorCodes.EIO();
}
}
@Override
public int write_buf(String path, FuseBufvec buf, @off_t long off, FuseFileInfo fi) {
int size = (int) libFuse.fuse_buf_size(buf);
FuseBufvec tmpVec = new FuseBufvec(Runtime.getSystemRuntime());
long tmpVecAddr = MemoryIO.getInstance().allocateMemory(Struct.size(tmpVec), false);
try {
tmpVec.useMemory(Pointer.wrap(Runtime.getSystemRuntime(), tmpVecAddr));
FuseBufvec.init(tmpVec, size);
var bb = UninitializedByteBuffer.allocate(size);
var mem = UninitializedByteBuffer.getAddress(bb);
tmpVec.buf.mem.set(mem);
tmpVec.buf.size.set(size);
int res = (int) libFuse.fuse_buf_copy(tmpVec, buf, 0);
if (res != size) {
Log.errorv("fuse_buf_copy failed: {0} != {1}", res, size);
return -ErrorCodes.ENOMEM();
}
return write(path, bb, off, fi);
} finally {
if (tmpVecAddr != 0) {
MemoryIO.getInstance().freeMemory(tmpVecAddr);
}
}
}
}

View File

@@ -1,7 +1,6 @@
package com.usatiuk.dhfsfuse;
package com.usatiuk.dhfs.fuse;
import com.google.protobuf.ByteOutput;
import com.usatiuk.utils.UnsafeAccessor;
import jnr.ffi.Pointer;
import java.nio.ByteBuffer;
@@ -10,12 +9,14 @@ import java.nio.MappedByteBuffer;
public class JnrPtrByteOutput extends ByteOutput {
private final Pointer _backing;
private final long _size;
private final JnrPtrByteOutputAccessors _accessors;
private long _pos;
public JnrPtrByteOutput(Pointer backing, long size) {
public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) {
_backing = backing;
_size = size;
_pos = 0;
_accessors = accessors;
}
@Override
@@ -46,9 +47,9 @@ public class JnrPtrByteOutput extends ByteOutput {
if (value instanceof MappedByteBuffer mb) {
mb.load();
}
long addr = UnsafeAccessor.NIO.getBufferAddress(value) + value.position();
long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position();
var out = _backing.address() + _pos;
UnsafeAccessor.UNSAFE.copyMemory(addr, out, rem);
_accessors.getUnsafe().copyMemory(addr, out, rem);
} else {
_backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem);
}

View File

@@ -0,0 +1,29 @@
package com.usatiuk.dhfs.fuse;
import jakarta.inject.Singleton;
import jdk.internal.access.JavaNioAccess;
import jdk.internal.access.SharedSecrets;
import sun.misc.Unsafe;
import java.lang.reflect.Field;
@Singleton
class JnrPtrByteOutputAccessors {
JavaNioAccess _nioAccess;
Unsafe _unsafe;
JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException {
_nioAccess = SharedSecrets.getJavaNioAccess();
Field f = Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
_unsafe = (Unsafe) f.get(null);
}
public JavaNioAccess getNioAccess() {
return _nioAccess;
}
public Unsafe getUnsafe() {
return _unsafe;
}
}

View File

@@ -14,9 +14,8 @@ dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=524288
dhfs.files.max_chunk_size=524288
dhfs.files.target_chunk_alignment=17
dhfs.files.target_chunk_size=2097152
dhfs.files.target_chunk_alignment=19
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true

View File

@@ -0,0 +1,29 @@
package com.usatiuk.dhfs;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsfuse;
package com.usatiuk.dhfs;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
@@ -30,7 +30,7 @@ public class TestDataCleaner {
purgeDirectory(Path.of(tempDirectory).toFile());
}
public static void purgeDirectory(File dir) {
void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);

View File

@@ -1,5 +1,6 @@
package com.usatiuk.dhfsfuse;
package com.usatiuk.dhfs.fuse;
import com.usatiuk.dhfs.TempDataProfile;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
import org.eclipse.microprofile.config.inject.ConfigProperty;

View File

@@ -1,215 +0,0 @@
package com.usatiuk.dhfsfuse.integration;
import io.quarkus.logging.Log;
import java.io.*;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
public class LazyFs {
private static final String lazyFsPath;
static {
lazyFsPath = System.getProperty("lazyFsPath");
System.out.println("LazyFs Path: " + lazyFsPath);
}
private final String mountRoot;
private final String dataRoot;
private final String name;
private final File configFile;
private final File fifoFile;
private Thread errPiper;
private Thread outPiper;
private CountDownLatch startLatch;
private Process fs;
public LazyFs(String name, String mountRoot, String dataRoot) {
this.name = name;
this.mountRoot = mountRoot;
this.dataRoot = dataRoot;
try {
configFile = File.createTempFile("lazyfs", ".conf");
configFile.deleteOnExit();
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
fifoFile.deleteOnExit();
} catch (IOException e) {
throw new RuntimeException(e);
}
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
}
private String fifoPath() {
return fifoFile.getAbsolutePath();
}
public void start(String extraOpts) {
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
if (!lfsPath.toFile().isFile())
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
if (!lfsPath.toFile().canExecute())
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
try (var rwFile = new RandomAccessFile(configFile, "rw");
var channel = rwFile.getChannel()) {
channel.truncate(0);
var config = "[faults]\n" +
"fifo_path=\"" + fifoPath() + "\"\n" +
"[cache]\n" +
"apply_eviction=false\n" +
"[cache.simple]\n" +
"custom_size=\"1gb\"\n" +
"blocks_per_page=1\n" +
"[filesystem]\n" +
"log_all_operations=false\n" +
"logfile=\"\"\n" + extraOpts;
rwFile.write(config.getBytes());
Log.info("LazyFs config: \n" + config);
} catch (Exception e) {
throw new RuntimeException(e);
}
var argList = new ArrayList<String>();
argList.add(lfsPath.toString());
argList.add(Path.of(mountRoot).toString());
argList.add("--config-path");
argList.add(configFile.getAbsolutePath());
argList.add("-o");
argList.add("allow_other");
argList.add("-o");
argList.add("modules=subdir");
argList.add("-o");
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
try {
Log.info("Starting LazyFs " + argList);
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
} catch (Exception e) {
throw new RuntimeException(e);
}
startLatch = new CountDownLatch(1);
outPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
String line;
while ((line = input.readLine()) != null) {
if (line.contains("running LazyFS"))
startLatch.countDown();
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs out piper finished");
});
outPiper.start();
errPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
String line;
while ((line = input.readLine()) != null) {
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs err piper finished");
});
errPiper.start();
try {
if (!startLatch.await(30, TimeUnit.SECONDS))
throw new RuntimeException("StartLatch timed out");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
Log.info("LazyFs started");
}
public void start() {
start("");
}
private String mdbPath() {
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
}
public void startTornOp() {
start("\n" +
"[[injection]]\n" +
"type=\"torn-seq\"\n" +
"op=\"write\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"persist=[1,4]\n" +
"occurrence=3");
}
public void startTornSeq() {
start("[[injection]]\n" +
"type=\"torn-op\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"occurrence=3\n" +
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
"persist=[1,3]");
}
public void crash() {
try {
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
Log.info("Running command: " + cmd);
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void stop() {
try {
synchronized (this) {
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
// Doesn't actually work?
//
// public void crashop() {
// try {
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
//
// public void crashseq() {
// try {
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
}

View File

@@ -1,489 +0,0 @@
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class LazyFsIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
File data1Lazy;
File data2Lazy;
LazyFs lazyFs1;
LazyFs lazyFs2;
ExecutorService executor;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("dhfsdata").toFile();
data2 = Files.createTempDirectory("dhfsdata").toFile();
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
network = Network.newNetwork();
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
lazyFs1.start();
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
lazyFs2.start();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
lazyFs1.stop();
lazyFs2.stop();
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data1Lazy);
TestDataCleaner.purgeDirectory(data2);
TestDataCleaner.purgeDirectory(data2Lazy);
executor.close();
network.close();
}
private void checkConsistency(String testName) {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info("Listing consistency " + testName + "\n"
+ ls1 + "\n"
+ cat1 + "\n"
+ ls2 + "\n"
+ cat2 + "\n");
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Log.info("Killing");
Thread.sleep(3000);
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Thread.sleep(3000);
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
private static enum CrashType {
CRASH,
TORN_OP,
TORN_SEQ
}
}

View File

@@ -0,0 +1,32 @@
package com.usatiuk.kleppmanntree;
import java.io.Serializable;
public class AtomicClock implements Clock<Long>, Serializable {
private long _max = 0;
public AtomicClock(long counter) {
_max = counter;
}
@Override
public Long getTimestamp() {
return ++_max;
}
public void setTimestamp(Long timestamp) {
_max = timestamp;
}
@Override
public Long peekTimestamp() {
return _max;
}
@Override
public Long updateTimestamp(Long receivedTimestamp) {
var old = _max;
_max = Math.max(_max, receivedTimestamp) + 1;
return old;
}
}

View File

@@ -1,5 +1,7 @@
package com.usatiuk.kleppmanntree;
import jakarta.annotation.Nonnull;
import jakarta.annotation.Nullable;
import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
@@ -15,6 +17,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
private final PeerInterface<PeerIdT> _peers;
private final Clock<TimestampT> _clock;
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
PeerInterface<PeerIdT> peers,
@@ -86,6 +89,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
node.withParent(null)
.withLastEffectiveOp(null)
);
_undoCtx.put(node.key(), node);
}
}
@@ -213,16 +217,31 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
assert cmp != 0;
if (cmp < 0) {
if (log.containsKey(op.timestamp())) return;
var toUndo = log.newestSlice(op.timestamp(), false);
for (var entry : toUndo.reversed()) {
undoOp(entry.getValue());
try {
if (log.containsKey(op.timestamp())) return;
var toUndo = log.newestSlice(op.timestamp(), false);
_undoCtx = new HashMap<>();
for (var entry : toUndo.reversed()) {
undoOp(entry.getValue());
}
try {
doAndPut(op, failCreatingIfExists);
} finally {
for (var entry : toUndo) {
redoOp(entry);
}
if (!_undoCtx.isEmpty()) {
for (var e : _undoCtx.entrySet()) {
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
_storage.removeNode(e.getKey());
}
}
_undoCtx = null;
}
} finally {
tryTrimLog();
}
doAndPut(op, failCreatingIfExists);
for (var entry : toUndo) {
redoOp(entry);
}
tryTrimLog();
} else {
doAndPut(op, failCreatingIfExists);
tryTrimLog();
@@ -245,7 +264,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
} catch (AlreadyExistsException aex) {
throw aex;
} catch (Exception e) {
throw new RuntimeException("Error computing effects for op " + op.toString(), e);
LOGGER.log(Level.SEVERE, "Error computing effects for op" + op.toString(), e);
computed = new LogRecord<>(op, null);
}
if (computed.effects() != null)
@@ -254,6 +274,24 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
if (_undoCtx != null) {
var node = _undoCtx.get(key);
if (node != null) {
try {
if (!node.children().isEmpty()) {
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
}
node = node.withParent(parent).withMeta(meta);
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
node = null;
}
}
if (node != null) {
_undoCtx.remove(key);
return node;
}
}
return _storage.createNewNode(key, parent, meta);
}
@@ -334,6 +372,10 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
var conflictNode = _storage.getById(conflictNodeId);
MetaT conflictNodeMeta = conflictNode.meta();
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
return new LogRecord<>(op, null);
}
LOGGER.finer(() -> "Node creation conflict: " + conflictNode);
String newConflictNodeName = op.newName() + ".conflict." + conflictNode.key();
@@ -358,14 +400,18 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
if (oldMeta != null
&& op.newMeta() != null
&& !oldMeta.getClass().equals(op.newMeta().getClass())) {
throw new RuntimeException("Class mismatch for meta for node " + node.key());
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
return new LogRecord<>(op, null);
}
var replaceNodeId = newParent.children().get(op.newName());
if (replaceNodeId != null) {
var replaceNode = _storage.getById(replaceNodeId);
var replaceNodeMeta = replaceNode.meta();
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
return new LogRecord<>(op, null);
}
LOGGER.finer(() -> "Node replacement: " + replaceNode);
return new LogRecord<>(op, List.of(

View File

@@ -10,14 +10,14 @@ public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT exten
NodeIdT childId) implements Serializable {
public String oldName() {
if (oldInfo.oldMeta() != null) {
return oldInfo.oldMeta().name();
return oldInfo.oldMeta().getName();
}
return childId.toString();
}
public String newName() {
if (newMeta != null) {
return newMeta.name();
return newMeta.getName();
}
return childId.toString();
}

View File

@@ -3,7 +3,7 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
public interface NodeMeta extends Serializable {
String name();
String getName();
NodeMeta withName(String name);
}

View File

@@ -7,7 +7,7 @@ public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends
NodeIdT childId) implements Serializable {
public String newName() {
if (newMeta != null)
return newMeta.name();
return newMeta.getName();
return childId.toString();
}
}

View File

@@ -17,7 +17,7 @@ public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT ext
default String name() {
var meta = meta();
if (meta != null) return meta.name();
if (meta != null) return meta.getName();
return key().toString();
}

View File

@@ -8,7 +8,7 @@ public abstract class TestNodeMeta implements NodeMeta {
}
@Override
public String name() {
public String getName() {
return _name;
}

View File

@@ -1,13 +1,6 @@
package com.usatiuk.objects;
import com.usatiuk.objects.iterators.Data;
public sealed interface JDataVersionedWrapper extends Data<JDataVersionedWrapper> permits JDataVersionedWrapperLazy, JDataVersionedWrapperImpl {
@Override
default JDataVersionedWrapper value() {
return this;
}
public sealed interface JDataVersionedWrapper permits JDataVersionedWrapperLazy, JDataVersionedWrapperImpl {
JData data();
long version();

View File

@@ -3,9 +3,9 @@ package com.usatiuk.objects;
import java.util.function.Supplier;
public final class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
private JData _data;
private final long _version;
private final int _estimatedSize;
private JData _data;
private Supplier<JData> _producer;
public JDataVersionedWrapperLazy(long version, int estimatedSize, Supplier<JData> producer) {

View File

@@ -8,10 +8,11 @@ import jakarta.inject.Singleton;
import java.nio.ByteBuffer;
@Singleton
public class JDataVersionedWrapperSerializer {
public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVersionedWrapper> {
@Inject
ObjectSerializer<JData> dataSerializer;
@Override
public ByteString serialize(JDataVersionedWrapper obj) {
ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
buffer.putLong(obj.version());
@@ -19,10 +20,10 @@ public class JDataVersionedWrapperSerializer {
return ByteString.copyFrom(buffer).concat(dataSerializer.serialize(obj.data()));
}
public JDataVersionedWrapper deserialize(ByteBuffer data) {
var version = data.getLong();
return new JDataVersionedWrapperLazy(version, data.remaining(),
() -> dataSerializer.deserialize(data)
);
@Override
public JDataVersionedWrapper deserialize(ByteString data) {
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
var rawData = data.substring(Long.BYTES);
return new JDataVersionedWrapperLazy(version, rawData.size(), () -> dataSerializer.deserialize(rawData));
}
}

View File

@@ -1,7 +1,5 @@
package com.usatiuk.objects;
import com.usatiuk.utils.UninitializedByteBuffer;
import java.io.Serial;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
@@ -48,7 +46,7 @@ public final class JObjectKeyImpl implements JObjectKey {
synchronized (this) {
if (_bb != null) return _bb;
var bytes = value.getBytes(StandardCharsets.ISO_8859_1);
var directBb = UninitializedByteBuffer.allocate(bytes.length);
var directBb = ByteBuffer.allocateDirect(bytes.length);
directBb.put(bytes);
directBb.flip();
_bb = directBb;
@@ -71,7 +69,7 @@ public final class JObjectKeyImpl implements JObjectKey {
@Override
public int hashCode() {
return value.hashCode();
return Objects.hash(value);
}
}

View File

@@ -2,13 +2,11 @@ package com.usatiuk.objects;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.utils.SerializationHelper;
import com.usatiuk.dhfs.utils.SerializationHelper;
import io.quarkus.arc.DefaultBean;
import jakarta.enterprise.context.ApplicationScoped;
import java.io.IOException;
import java.nio.ByteBuffer;
@ApplicationScoped
@DefaultBean
@@ -18,8 +16,9 @@ public class JavaDataSerializer implements ObjectSerializer<JData> {
return SerializationHelper.serialize(obj);
}
public JData deserialize(ByteBuffer data) {
try (var is = UnsafeByteOperations.unsafeWrap(data).newInput()) {
@Override
public JData deserialize(ByteString data) {
try (var is = data.newInput()) {
return SerializationHelper.deserialize(is);
} catch (IOException e) {
throw new RuntimeException(e);

View File

@@ -2,10 +2,8 @@ package com.usatiuk.objects;
import com.google.protobuf.ByteString;
import java.nio.ByteBuffer;
public interface ObjectSerializer<T> {
ByteString serialize(T obj);
T deserialize(ByteBuffer data);
T deserialize(ByteString data);
}

View File

@@ -1,10 +1,11 @@
package com.usatiuk.objects.iterators;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import org.apache.commons.lang3.tuple.Pair;
import java.util.Iterator;
public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends Iterator<Pair<K, V>>, AutoCloseable {
public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends Iterator<Pair<K, V>>, AutoCloseableNoThrow {
K peekNextKey();
void skip();
@@ -20,7 +21,4 @@ public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends
default CloseableKvIterator<K, V> reversed() {
return new ReversedKvIterator<K, V>(this);
}
@Override
void close();
}

View File

@@ -1,5 +1,10 @@
package com.usatiuk.objects.iterators;
public interface Data<V> extends MaybeTombstone<V> {
V value();
import java.util.Optional;
public record Data<V>(V value) implements MaybeTombstone<V> {
@Override
public Optional<V> opt() {
return Optional.of(value);
}
}

View File

@@ -1,4 +0,0 @@
package com.usatiuk.objects.iterators;
public record DataWrapper<V>(V value) implements Data<V> {
}

View File

@@ -0,0 +1,12 @@
package com.usatiuk.objects.iterators;
import java.util.stream.Stream;
@FunctionalInterface
public interface IterProdFn<K extends Comparable<K>, V> {
CloseableKvIterator<K, V> get(IteratorStart start, K key);
default Stream<CloseableKvIterator<K, MaybeTombstone<V>>> getFlat(IteratorStart start, K key) {
return Stream.of(new MappingKvIterator<>(get(start, key), Data::new));
}
}

View File

@@ -39,20 +39,20 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
}
// switch (start) {
// case LT -> {
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
// }
// case LE -> {
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
// }
// case GT -> {
// assert _next == null || _next.compareTo(startKey) > 0;
// }
// case GE -> {
// assert _next == null || _next.compareTo(startKey) >= 0;
// }
// }
switch (start) {
case LT -> {
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
}
case LE -> {
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
}
case GT -> {
assert _next == null || _next.compareTo(startKey) > 0;
}
case GE -> {
assert _next == null || _next.compareTo(startKey) >= 0;
}
}
}
private void fillNext() {

View File

@@ -1,4 +1,7 @@
package com.usatiuk.objects.iterators;
import java.util.Optional;
public interface MaybeTombstone<T> {
Optional<T> opt();
}

View File

@@ -1,27 +1,37 @@
package com.usatiuk.objects.iterators;
import io.quarkus.logging.Log;
import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.commons.lang3.mutable.MutableObject;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
import java.util.NavigableMap;
import java.util.NoSuchElementException;
import java.util.TreeMap;
import java.util.*;
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, MaybeTombstone<V>> {
private record IteratorEntry<K extends Comparable<K>, V>(int priority,
CloseableKvIterator<K, MaybeTombstone<V>> iterator) {
public IteratorEntry<K, V> reversed() {
return new IteratorEntry<>(priority, iterator.reversed());
}
}
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final NavigableMap<K, IteratorEntry<K, V>> _sortedIterators = new TreeMap<>();
private final String _name;
private final List<IteratorEntry<K, V>> _iterators;
public MergingKvIterator(IteratorStart startType, K startKey, List<CloseableKvIterator<K, V>> iterators) {
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
_goingForward = true;
_name = name;
// Why streams are so slow?
{
IteratorEntry<K, V>[] iteratorEntries = new IteratorEntry[iterators.size()];
for (int i = 0; i < iterators.size(); i++) {
iteratorEntries[i] = new IteratorEntry<>(i, iterators.get(i));
}
_iterators = List.of(iteratorEntries);
var iteratorsTmp = iterators.stream().flatMap(i -> i.getFlat(startType, startKey));
MutableInt i = new MutableInt(0);
ArrayList<IteratorEntry<K, V>> tmp = new ArrayList<>(16);
iteratorsTmp.forEach(i2 -> {
tmp.add(new IteratorEntry<>(i.getAndIncrement(), i2));
});
_iterators = List.copyOf(tmp);
}
if (startType == IteratorStart.LT || startType == IteratorStart.LE) {
@@ -71,32 +81,32 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
advanceIterator(iterator);
}
// Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
// switch (startType) {
//// case LT -> {
//// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
//// }
//// case LE -> {
//// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
//// }
// case GT -> {
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) > 0;
Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
switch (startType) {
// case LT -> {
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
// }
// case GE -> {
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) >= 0;
// case LE -> {
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
// }
// }
case GT -> {
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) > 0;
}
case GE -> {
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) >= 0;
}
}
}
@SafeVarargs
public MergingKvIterator(IteratorStart startType, K startKey, CloseableKvIterator<K, V>... iterators) {
this(startType, startKey, List.of(iterators));
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
this(name, startType, startKey, List.of(iterators));
}
private void advanceIterator(IteratorEntry<K, V> iteratorEntry) {
while (iteratorEntry.iterator().hasNext()) {
K key = iteratorEntry.iterator().peekNextKey();
// Log.tracev("{0} Advance peeked: {1}-{2}", _name, iteratorEntry, key);
Log.tracev("{0} Advance peeked: {1}-{2}", _name, iteratorEntry, key);
MutableObject<IteratorEntry<K, V>> mutableBoolean = new MutableObject<>(null);
@@ -116,7 +126,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
});
if (newVal != iteratorEntry) {
// Log.tracev("{0} Skipped: {1}", _name, iteratorEntry.iterator().peekNextKey());
Log.tracev("{0} Skipped: {1}", _name, iteratorEntry.iterator().peekNextKey());
iteratorEntry.iterator().skip();
continue;
}
@@ -132,7 +142,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
@Override
protected void reverse() {
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
// Log.tracev("{0} Reversing from {1}", _name, cur);
Log.tracev("{0} Reversing from {1}", _name, cur);
_goingForward = !_goingForward;
_sortedIterators.clear();
for (IteratorEntry<K, V> iterator : _iterators) {
@@ -149,6 +159,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|| (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) {
skipImpl();
}
Log.tracev("{0} Reversed to {1}", _name, _sortedIterators);
}
@Override
@@ -166,7 +177,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
}
cur.getValue().iterator().skip();
advanceIterator(cur.getValue());
// Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
}
@Override
@@ -175,7 +186,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
}
@Override
protected Pair<K, V> nextImpl() {
protected Pair<K, MaybeTombstone<V>> nextImpl() {
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
if (cur == null) {
throw new NoSuchElementException();
@@ -196,14 +207,22 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
@Override
public String toString() {
return "MergingKvIterator{" +
"_name='" + _name + '\'' +
", _sortedIterators=" + _sortedIterators.keySet() +
", _iterators=" + _iterators +
'}';
}
private record IteratorEntry<K extends Comparable<K>, V>(int priority, CloseableKvIterator<K, V> iterator) {
public IteratorEntry<K, V> reversed() {
return new IteratorEntry<>(priority, iterator.reversed());
}
private interface FirstMatchState<K extends Comparable<K>, V> {
}
private record FirstMatchNone<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
}
private record FirstMatchFound<K extends Comparable<K>, V>(
CloseableKvIterator<K, V> iterator) implements FirstMatchState<K, V> {
}
private record FirstMatchConsumed<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
}
}

View File

@@ -9,22 +9,22 @@ public class NavigableMapKvIterator<K extends Comparable<K>, V> extends Reversib
private Iterator<Map.Entry<K, V>> _iterator;
private Map.Entry<K, V> _next;
public NavigableMapKvIterator(NavigableMap<K, ? extends V> map, IteratorStart start, K key) {
_map = (NavigableMap<K, V>) map;
public NavigableMapKvIterator(NavigableMap<K, V> map, IteratorStart start, K key) {
_map = map;
SortedMap<K, V> _view;
_goingForward = true;
switch (start) {
case GE -> _view = _map.tailMap(key, true);
case GT -> _view = _map.tailMap(key, false);
case GE -> _view = map.tailMap(key, true);
case GT -> _view = map.tailMap(key, false);
case LE -> {
var floorKey = _map.floorKey(key);
var floorKey = map.floorKey(key);
if (floorKey == null) _view = _map;
else _view = _map.tailMap(floorKey, true);
else _view = map.tailMap(floorKey, true);
}
case LT -> {
var lowerKey = map.lowerKey(key);
if (lowerKey == null) _view = _map;
else _view = _map.tailMap(lowerKey, true);
else _view = map.tailMap(lowerKey, true);
}
default -> throw new IllegalArgumentException("Unknown start type");
}

View File

@@ -1,27 +1,28 @@
package com.usatiuk.objects.iterators;
import io.quarkus.logging.Log;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.function.Function;
public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final MergingKvIterator<K, MaybeTombstone<V>> _backing;
private Pair<K, V> _next = null;
public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends ReversibleKvIterator<K, V_T> {
private final CloseableKvIterator<K, V> _backing;
private final Function<V, V_T> _transformer;
private Pair<K, V_T> _next = null;
private boolean _checkedNext = false;
public TombstoneSkippingIterator(IteratorStart start, K startKey, List<CloseableKvIterator<K, MaybeTombstone<V>>> iterators) {
public PredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<V, V_T> transformer) {
_goingForward = true;
_backing = new MergingKvIterator<>(start, startKey, iterators);
_backing = backing;
_transformer = transformer;
if (start == IteratorStart.GE || start == IteratorStart.GT)
return;
boolean shouldGoBack = false;
if (canHaveNext())
tryFillNext();
fillNext();
boolean shouldGoBack = false;
if (start == IteratorStart.LE) {
if (_next == null || _next.getKey().compareTo(startKey) > 0) {
shouldGoBack = true;
@@ -38,27 +39,34 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
_backing.skipPrev();
fillNext();
_goingForward = true;
if (_next != null)
_backing.skip();
_backing.skip();
fillNext();
}
switch (start) {
case LT -> {
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
}
case LE -> {
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
}
case GT -> {
assert _next == null || _next.getKey().compareTo(startKey) > 0;
}
case GE -> {
assert _next == null || _next.getKey().compareTo(startKey) >= 0;
}
}
}
private boolean canHaveNext() {
return (_goingForward ? _backing.hasNext() : _backing.hasPrev());
}
private boolean tryFillNext() {
var next = _goingForward ? _backing.next() : _backing.prev();
if (next.getValue() instanceof Tombstone<?>)
return false;
_next = Pair.of(next.getKey(), ((Data<V>) next.getValue()).value());
return true;
}
private void fillNext() {
while (_next == null && canHaveNext()) {
tryFillNext();
while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) {
var next = _goingForward ? _backing.next() : _backing.prev();
var transformed = _transformer.apply(next.getValue());
if (transformed == null)
continue;
_next = Pair.of(next.getKey(), transformed);
}
_checkedNext = true;
}
@@ -73,6 +81,9 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
else if (!_goingForward && !wasAtEnd)
_backing.skipPrev();
if (!wasAtEnd)
Log.tracev("Skipped in reverse: {0}", _next);
_next = null;
_checkedNext = false;
}
@@ -107,7 +118,7 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
}
@Override
protected Pair<K, V> nextImpl() {
protected Pair<K, V_T> nextImpl() {
if (!_checkedNext)
fillNext();
@@ -127,6 +138,7 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
@Override
public String toString() {
return "PredicateKvIterator{" +
"_backing=" + _backing +
", _next=" + _next +
'}';
}

View File

@@ -1,4 +1,10 @@
package com.usatiuk.objects.iterators;
public interface Tombstone<V> extends MaybeTombstone<V> {
import java.util.Optional;
public record Tombstone<V>() implements MaybeTombstone<V> {
@Override
public Optional<V> opt() {
return Optional.empty();
}
}

View File

@@ -1,4 +0,0 @@
package com.usatiuk.objects.iterators;
public record TombstoneImpl<V>() implements Tombstone<V> {
}

View File

@@ -0,0 +1,83 @@
package com.usatiuk.objects.iterators;
import io.quarkus.logging.Log;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
public class TombstoneMergingKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
private final CloseableKvIterator<K, V> _backing;
private final String _name;
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
_name = name;
_backing = new PredicateKvIterator<>(
new MergingKvIterator<>(name + "-merging", startType, startKey, iterators),
startType, startKey,
pair -> {
Log.tracev("{0} - Processing pair {1}", _name, pair);
if (pair instanceof Tombstone) {
return null;
}
return ((Data<V>) pair).value();
});
}
@SafeVarargs
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
this(name, startType, startKey, List.of(iterators));
}
@Override
public K peekNextKey() {
return _backing.peekNextKey();
}
@Override
public void skip() {
_backing.skip();
}
@Override
public K peekPrevKey() {
return _backing.peekPrevKey();
}
@Override
public Pair<K, V> prev() {
return _backing.prev();
}
@Override
public boolean hasPrev() {
return _backing.hasPrev();
}
@Override
public void skipPrev() {
_backing.skipPrev();
}
@Override
public void close() {
_backing.close();
}
@Override
public boolean hasNext() {
return _backing.hasNext();
}
@Override
public Pair<K, V> next() {
return _backing.next();
}
@Override
public String toString() {
return "TombstoneMergingKvIterator{" +
"_backing=" + _backing +
", _name='" + _name + '\'' +
'}';
}
}

View File

@@ -1,22 +1,22 @@
package com.usatiuk.objects.snapshot;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IterProdFn;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.MaybeTombstone;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Optional;
public interface Snapshot<K extends Comparable<K>, V> extends AutoCloseable {
List<CloseableKvIterator<K, MaybeTombstone<V>>> getIterator(IteratorStart start, K key);
public interface Snapshot<K extends Comparable<K>, V> extends AutoCloseableNoThrow {
IterProdFn<K, V> getIterator();
default CloseableKvIterator<K, V> getIterator(IteratorStart start, K key) {
return getIterator().get(start, key);
}
@Nonnull
Optional<V> readObject(K name);
long id();
@Override
void close();
}

View File

@@ -0,0 +1,29 @@
package com.usatiuk.objects.snapshot;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
import com.usatiuk.objects.transaction.TxRecord;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import jakarta.inject.Singleton;
import javax.annotation.Nonnull;
import java.util.Collection;
import java.util.Optional;
import java.util.function.Consumer;
@Singleton
public class SnapshotManager {
@Inject
WritebackObjectPersistentStore writebackStore;
public Snapshot<JObjectKey, JDataVersionedWrapper> createSnapshot() {
return writebackStore.getSnapshot();
}
// This should not be called for the same objects concurrently
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes) {
return writebackStore.commitTx(writes);
}
}

View File

@@ -5,7 +5,6 @@ import com.usatiuk.objects.JDataVersionedWrapperLazy;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.utils.ListUtils;
import io.quarkus.logging.Log;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
@@ -17,20 +16,56 @@ import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Stream;
@ApplicationScoped
public class CachingObjectPersistentStore {
private final AtomicReference<Cache> _cache;
@Inject
SerializingObjectPersistentStore delegate;
@ConfigProperty(name = "dhfs.objects.lru.print-stats")
boolean printStats;
private record Cache(TreePMap<JObjectKey, CacheEntry> map,
int size,
long version,
int sizeLimit) {
public Cache withPut(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
int objSize = obj.map(JDataVersionedWrapper::estimateSize).orElse(16);
int newSize = size() + objSize;
var entry = new CacheEntry(obj.<MaybeTombstone<JDataVersionedWrapper>>map(Data::new).orElse(new Tombstone<>()), objSize);
var old = map.get(key);
if (old != null)
newSize -= old.size();
TreePMap<JObjectKey, CacheEntry> newCache = map().plus(key, entry);
while (newSize > sizeLimit) {
var del = newCache.firstEntry();
newCache = newCache.minusFirstEntry();
newSize -= del.getValue().size();
}
return new Cache(
newCache,
newSize,
version,
sizeLimit
);
}
public Cache withVersion(long version) {
return new Cache(map, size, version, sizeLimit);
}
}
private final AtomicReference<Cache> _cache;
private ExecutorService _commitExecutor;
private ExecutorService _statusExecutor;
private AtomicLong _cached = new AtomicLong();
private AtomicLong _cacheTries = new AtomicLong();
@@ -46,6 +81,7 @@ public class CachingObjectPersistentStore {
_cache.set(_cache.get().withVersion(s.id()));
}
_commitExecutor = Executors.newSingleThreadExecutor();
if (printStats) {
_statusExecutor = Executors.newSingleThreadExecutor();
_statusExecutor.submit(() -> {
@@ -66,6 +102,7 @@ public class CachingObjectPersistentStore {
Log.tracev("Committing: {0} writes, {1} deletes", objs.written().size(), objs.deleted().size());
var cache = _cache.get();
var commitFuture = _commitExecutor.submit(() -> delegate.prepareTx(objs, txId).run());
for (var write : objs.written()) {
cache = cache.withPut(write.getLeft(), Optional.of(write.getRight()));
}
@@ -73,7 +110,11 @@ public class CachingObjectPersistentStore {
cache = cache.withPut(del, Optional.empty());
}
cache = cache.withVersion(txId);
delegate.commitTx(objs, txId);
try {
commitFuture.get();
} catch (Exception e) {
throw new RuntimeException(e);
}
_cache.set(cache);
Log.tracev("Committed: {0} writes, {1} deletes", objs.written().size(), objs.deleted().size());
@@ -101,10 +142,10 @@ public class CachingObjectPersistentStore {
Snapshot<JObjectKey, JDataVersionedWrapper> finalBacking = backing;
Cache finalCurCache = curCache;
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
private final Cache _curCache = finalCurCache;
private final Snapshot<JObjectKey, JDataVersionedWrapper> _backing = finalBacking;
private boolean _invalid = false;
private boolean _closed = false;
private final Cache _curCache = finalCurCache;
private final Snapshot<JObjectKey, JDataVersionedWrapper> _backing = finalBacking;
private void doCache(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
_cacheTries.incrementAndGet();
@@ -146,12 +187,43 @@ public class CachingObjectPersistentStore {
}
@Override
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
return ListUtils.prependAndMap(
new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), start, key),
_backing.getIterator(start, key),
i -> new CachingKvIterator((CloseableKvIterator<JObjectKey, JDataVersionedWrapper>) (CloseableKvIterator<JObjectKey, ?>) i)
);
public IterProdFn<JObjectKey, JDataVersionedWrapper> getIterator() {
IterProdFn<JObjectKey, JDataVersionedWrapper> cacheItProdFn = new IterProdFn<JObjectKey, JDataVersionedWrapper>() {
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> get(IteratorStart start, JObjectKey key) {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getFlat(IteratorStart start, JObjectKey key) {
return Stream.of(
new MappingKvIterator<>(
new NavigableMapKvIterator<>(_curCache.map(), start, key),
e -> {
// Log.tracev("Taken from cache: {0}", e);
return e.object();
}
)
);
}
};
IterProdFn<JObjectKey, JDataVersionedWrapper> backingItProdFn = (mS, mK) -> new CachingKvIterator(_backing.getIterator(mS, mK));
return new IterProdFn<JObjectKey, JDataVersionedWrapper>() {
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> get(IteratorStart start, JObjectKey key) {
return new TombstoneMergingKvIterator<>("cache", start, key, cacheItProdFn, backingItProdFn);
}
@Override
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getFlat(IteratorStart start, JObjectKey key) {
return Stream.concat(
cacheItProdFn.getFlat(start, key),
backingItProdFn.getFlat(start, key)
);
}
};
}
@Nonnull
@@ -159,12 +231,12 @@ public class CachingObjectPersistentStore {
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
var cached = _curCache.map().get(name);
if (cached != null) {
return switch (cached) {
case CacheEntryPresent data -> Optional.of(data.value());
case CacheEntryMiss tombstone -> {
return switch (cached.object()) {
case Data<JDataVersionedWrapper> data -> Optional.of(data.value());
case Tombstone<JDataVersionedWrapper> tombstone -> {
yield Optional.empty();
}
default -> throw new IllegalStateException("Unexpected value: " + cached);
default -> throw new IllegalStateException("Unexpected value: " + cached.object());
};
}
var read = _backing.readObject(name);
@@ -183,7 +255,7 @@ public class CachingObjectPersistentStore {
_backing.close();
}
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> {
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, JDataVersionedWrapper> {
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _delegate;
private CachingKvIterator(CloseableKvIterator<JObjectKey, JDataVersionedWrapper> delegate) {
@@ -216,10 +288,10 @@ public class CachingObjectPersistentStore {
}
@Override
public Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> prev() {
public Pair<JObjectKey, JDataVersionedWrapper> prev() {
var prev = _delegate.prev();
maybeCache(prev.getKey(), Optional.of(prev.getValue()));
return (Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (Pair<JObjectKey, ?>) prev;
return prev;
}
@Override
@@ -233,10 +305,10 @@ public class CachingObjectPersistentStore {
}
@Override
public Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> next() {
public Pair<JObjectKey, JDataVersionedWrapper> next() {
var next = _delegate.next();
maybeCache(next.getKey(), Optional.of(next.getValue()));
return (Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (Pair<JObjectKey, ?>) next;
return next;
}
}
};
@@ -249,53 +321,6 @@ public class CachingObjectPersistentStore {
}
}
private interface CacheEntry extends MaybeTombstone<JDataVersionedWrapper> {
int size();
}
private record Cache(TreePMap<JObjectKey, CacheEntry> map,
int size,
long version,
int sizeLimit) {
public Cache withPut(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
var entry = obj.<CacheEntry>map(o -> new CacheEntryPresent(o, o.estimateSize())).orElse(new CacheEntryMiss());
int newSize = size() + entry.size();
var old = map.get(key);
if (old != null)
newSize -= old.size();
TreePMap<JObjectKey, CacheEntry> newCache = map();
while (newSize > sizeLimit) {
var del = newCache.firstEntry();
newCache = newCache.minusFirstEntry();
newSize -= del.getValue().size();
}
newCache = newCache.plus(key, entry);
return new Cache(
newCache,
newSize,
version,
sizeLimit
);
}
public Cache withVersion(long version) {
return new Cache(map, size, version, sizeLimit);
}
}
private record CacheEntryPresent(JDataVersionedWrapper value,
int size) implements CacheEntry, Data<JDataVersionedWrapper> {
}
private record CacheEntryMiss() implements CacheEntry, Tombstone<JDataVersionedWrapper> {
@Override
public int size() {
return 64;
}
private record CacheEntry(MaybeTombstone<JDataVersionedWrapper> object, int size) {
}
}

View File

@@ -1,9 +1,15 @@
package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.utils.RefcountedCloseable;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyMax;
import com.usatiuk.objects.JObjectKeyMin;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.iterators.IterProdFn;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.KeyPredicateKvIterator;
import com.usatiuk.objects.iterators.ReversibleKvIterator;
import com.usatiuk.objects.snapshot.Snapshot;
import io.quarkus.arc.properties.IfBuildProperty;
import io.quarkus.logging.Log;
@@ -23,10 +29,8 @@ import java.lang.ref.Cleaner;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.stream.Stream;
import static org.lmdbjava.DbiFlags.MDB_CREATE;
import static org.lmdbjava.Env.create;
@@ -38,9 +42,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
private static final String DB_VER_OBJ_NAME_STR = "__DB_VER_OBJ";
private static final ByteBuffer DB_VER_OBJ_NAME;
@ConfigProperty(name = "dhfs.objects.persistence.lmdb.size", defaultValue = "1000000000000")
long lmdbSize;
static {
byte[] tmp = DB_VER_OBJ_NAME_STR.getBytes(StandardCharsets.ISO_8859_1);
var bb = ByteBuffer.allocateDirect(tmp.length);
@@ -64,7 +65,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
_root.toFile().mkdirs();
}
_env = create()
.setMapSize(lmdbSize)
.setMapSize(1_000_000_000_000L)
.setMaxDbs(1)
.open(_root.toFile(), EnvFlags.MDB_NOTLS);
_db = _env.openDbi(DB_NAME, MDB_CREATE);
@@ -100,54 +101,60 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
if (!_ready) throw new IllegalStateException("Wrong service order!");
}
@Nonnull
@Override
public Snapshot<JObjectKey, ByteBuffer> getSnapshot() {
var txn = _env.txnRead();
try {
long commitId = readTxId(txn).orElseThrow();
return new Snapshot<JObjectKey, ByteBuffer>() {
private final Txn<ByteBuffer> _txn = txn;
private final long _id = commitId;
private boolean _closed = false;
@Override
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
assert !_closed;
return List.of(new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn, start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR)));
}
@Nonnull
@Override
public Optional<ByteBuffer> readObject(JObjectKey name) {
assert !_closed;
var got = _db.get(_txn, name.toByteBuffer());
var ret = Optional.ofNullable(got).map(ByteBuffer::asReadOnlyBuffer);
return ret;
}
@Override
public long id() {
assert !_closed;
return _id;
}
@Override
public void close() {
assert !_closed;
_closed = true;
_txn.close();
}
};
} catch (Exception e) {
txn.close();
throw e;
public Optional<ByteString> readObject(JObjectKey name) {
verifyReady();
try (Txn<ByteBuffer> txn = _env.txnRead()) {
var value = _db.get(txn, name.toByteBuffer());
return Optional.ofNullable(value).map(ByteString::copyFrom);
}
}
@Override
public void commitTx(TxManifestRaw names, long txId) {
public Snapshot<JObjectKey, ByteString> getSnapshot() {
var txn = new RefcountedCloseable<>(_env.txnRead());
long commitId = readTxId(txn.get()).orElseThrow();
return new Snapshot<JObjectKey, ByteString>() {
private final RefcountedCloseable<Txn<ByteBuffer>> _txn = txn;
private final long _id = commitId;
private boolean _closed = false;
@Override
public IterProdFn<JObjectKey, ByteString> getIterator() {
assert !_closed;
return (start, key) -> new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn.ref(), start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR));
}
@Nonnull
@Override
public Optional<ByteString> readObject(JObjectKey name) {
assert !_closed;
var got = _db.get(_txn.get(), name.toByteBuffer());
var ret = Optional.ofNullable(got).map(UnsafeByteOperations::unsafeWrap);
return ret;
}
@Override
public long id() {
assert !_closed;
return _id;
}
@Override
public void close() {
assert !_closed;
_closed = true;
_txn.unref();
}
};
}
@Override
public Runnable prepareTx(TxManifestRaw names, long txId) {
verifyReady();
try (var txn = _env.txnWrite()) {
var txn = _env.txnWrite();
try {
for (var written : names.written()) {
var putBb = _db.reserve(txn, written.getKey().toByteBuffer(), written.getValue().size());
written.getValue().copyTo(putBb);
@@ -162,8 +169,17 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
bbData.putLong(txId);
bbData.flip();
_db.put(txn, DB_VER_OBJ_NAME.asReadOnlyBuffer(), bbData);
txn.commit();
} catch (Throwable t) {
txn.close();
throw t;
}
return () -> {
try {
txn.commit();
} finally {
txn.close();
}
};
}
@Override
@@ -178,30 +194,35 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
return _root.toFile().getFreeSpace();
}
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>> {
@Override
public long getUsableSpace() {
verifyReady();
return _root.toFile().getUsableSpace();
}
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, ByteString> {
private static final Cleaner CLEANER = Cleaner.create();
private final Txn<ByteBuffer> _txn; // Managed by the snapshot
private final RefcountedCloseable<Txn<ByteBuffer>> _txn;
private final Cursor<ByteBuffer> _cursor;
private final MutableObject<Boolean> _closed = new MutableObject<>(false);
// private final Exception _allocationStacktrace = new Exception();
// private final Exception _allocationStacktrace = null;
private final Exception _allocationStacktrace = null;
private boolean _hasNext = false;
private JObjectKey _peekedNextKey = null;
LmdbKvIterator(Txn<ByteBuffer> txn, IteratorStart start, JObjectKey key) {
LmdbKvIterator(RefcountedCloseable<Txn<ByteBuffer>> txn, IteratorStart start, JObjectKey key) {
_txn = txn;
_goingForward = true;
_cursor = _db.openCursor(_txn);
_cursor = _db.openCursor(_txn.get());
var closedRef = _closed;
// var bt = _allocationStacktrace;
// CLEANER.register(this, () -> {
// if (!closedRef.getValue()) {
// Log.error("Iterator was not closed before GC, allocated at: {0}", bt);
// System.exit(-1);
// }
// });
var bt = _allocationStacktrace;
CLEANER.register(this, () -> {
if (!closedRef.getValue()) {
Log.error("Iterator was not closed before GC, allocated at: {0}", bt);
System.exit(-1);
}
});
verifyReady();
@@ -256,24 +277,24 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
}
// var realGot = JObjectKey.fromByteBuffer(_cursor.key());
// _cursor.key().flip();
//
// switch (start) {
// case LT -> {
//// assert !_hasNext || realGot.compareTo(key) < 0;
// }
// case LE -> {
//// assert !_hasNext || realGot.compareTo(key) <= 0;
// }
// case GT -> {
// assert !_hasNext || realGot.compareTo(key) > 0;
// }
// case GE -> {
// assert !_hasNext || realGot.compareTo(key) >= 0;
// }
// }
// Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
var realGot = JObjectKey.fromByteBuffer(_cursor.key());
_cursor.key().flip();
switch (start) {
case LT -> {
// assert !_hasNext || realGot.compareTo(key) < 0;
}
case LE -> {
// assert !_hasNext || realGot.compareTo(key) <= 0;
}
case GT -> {
assert !_hasNext || realGot.compareTo(key) > 0;
}
case GE -> {
assert !_hasNext || realGot.compareTo(key) >= 0;
}
}
Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
}
@Override
@@ -283,6 +304,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
_closed.setValue(true);
_cursor.close();
_txn.unref();
}
@Override
@@ -301,7 +323,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
}
_goingForward = !_goingForward;
_peekedNextKey = null;
}
@Override
@@ -309,12 +330,8 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
if (!_hasNext) {
throw new NoSuchElementException("No more elements");
}
if (_peekedNextKey != null) {
return _peekedNextKey;
}
var ret = JObjectKey.fromByteBuffer(_cursor.key());
_cursor.key().flip();
_peekedNextKey = ret;
return ret;
}
@@ -324,7 +341,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
_hasNext = _cursor.next();
else
_hasNext = _cursor.prev();
_peekedNextKey = null;
}
@Override
@@ -333,19 +349,19 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
@Override
protected Pair<JObjectKey, MaybeTombstone<ByteBuffer>> nextImpl() {
protected Pair<JObjectKey, ByteString> nextImpl() {
if (!_hasNext) {
throw new NoSuchElementException("No more elements");
}
// TODO: Right now with java serialization it doesn't matter, it's all copied to arrays anyway
var val = _cursor.val();
Pair<JObjectKey, MaybeTombstone<ByteBuffer>> ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), new DataWrapper<>(val.asReadOnlyBuffer()));
var bs = UnsafeByteOperations.unsafeWrap(val);
var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), bs);
if (_goingForward)
_hasNext = _cursor.next();
else
_hasNext = _cursor.prev();
// Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
_peekedNextKey = null;
Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
return ret;
}
}

View File

@@ -2,18 +2,16 @@ package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.iterators.IterProdFn;
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
import com.usatiuk.objects.snapshot.Snapshot;
import io.quarkus.arc.properties.IfBuildProperty;
import jakarta.enterprise.context.ApplicationScoped;
import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Stream;
@ApplicationScoped
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory")
@@ -22,22 +20,30 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
private TreePMap<JObjectKey, ByteString> _objects = TreePMap.empty();
private long _lastCommitId = 0;
@Nonnull
@Override
public Snapshot<JObjectKey, ByteBuffer> getSnapshot() {
public Optional<ByteString> readObject(JObjectKey name) {
synchronized (this) {
return new Snapshot<JObjectKey, ByteBuffer>() {
return Optional.ofNullable(_objects.get(name));
}
}
@Override
public Snapshot<JObjectKey, ByteString> getSnapshot() {
synchronized (this) {
return new Snapshot<JObjectKey, ByteString>() {
private final TreePMap<JObjectKey, ByteString> _objects = MemoryObjectPersistentStore.this._objects;
private final long _lastCommitId = MemoryObjectPersistentStore.this._lastCommitId;
@Override
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
return List.of(new MappingKvIterator<>(new NavigableMapKvIterator<>(_objects, start, key), s -> new DataWrapper<>(s.asReadOnlyByteBuffer())));
public IterProdFn<JObjectKey, ByteString> getIterator() {
return (start, key) -> new NavigableMapKvIterator<>(_objects, start, key);
}
@Nonnull
@Override
public Optional<ByteBuffer> readObject(JObjectKey name) {
return Optional.ofNullable(_objects.get(name)).map(ByteString::asReadOnlyByteBuffer);
public Optional<ByteString> readObject(JObjectKey name) {
return Optional.ofNullable(_objects.get(name));
}
@Override
@@ -53,18 +59,19 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
}
}
@Override
public void commitTx(TxManifestRaw names, long txId) {
synchronized (this) {
for (var written : names.written()) {
_objects = _objects.plus(written.getKey(), written.getValue());
public Runnable prepareTx(TxManifestRaw names, long txId) {
return () -> {
synchronized (this) {
for (var written : names.written()) {
_objects = _objects.plus(written.getKey(), written.getValue());
}
for (JObjectKey key : names.deleted()) {
_objects = _objects.minus(key);
}
assert txId > _lastCommitId;
_lastCommitId = txId;
}
for (JObjectKey key : names.deleted()) {
_objects = _objects.minus(key);
}
assert txId > _lastCommitId;
_lastCommitId = txId;
}
};
}
@Override
@@ -76,4 +83,9 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
public long getFreeSpace() {
return 0;
}
@Override
public long getUsableSpace() {
return 0;
}
}

View File

@@ -2,20 +2,27 @@ package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.snapshot.Snapshot;
import javax.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.util.Optional;
import java.util.function.Consumer;
// Persistent storage of objects
// All changes are written as sequential transactions
public interface ObjectPersistentStore {
Snapshot<JObjectKey, ByteBuffer> getSnapshot();
@Nonnull
Optional<ByteString> readObject(JObjectKey name);
void commitTx(TxManifestRaw names, long txId);
Snapshot<JObjectKey, ByteString> getSnapshot();
Runnable prepareTx(TxManifestRaw names, long txId);
long getTotalSpace();
long getFreeSpace();
long getUsableSpace();
}

View File

@@ -1,9 +1,6 @@
package com.usatiuk.objects.stores;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.Tombstone;
public record PendingDelete(JObjectKey key,
long bundleId) implements PendingWriteEntry, Tombstone<JDataVersionedWrapper> {
public record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry {
}

View File

@@ -1,8 +1,6 @@
package com.usatiuk.objects.stores;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.iterators.Data;
public record PendingWrite(JDataVersionedWrapper value,
long bundleId) implements PendingWriteEntry, Data<JDataVersionedWrapper> {
public record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry {
}

View File

@@ -1,8 +1,5 @@
package com.usatiuk.objects.stores;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.iterators.MaybeTombstone;
public interface PendingWriteEntry extends MaybeTombstone<JDataVersionedWrapper> {
public interface PendingWriteEntry {
long bundleId();
}

View File

@@ -1,38 +1,39 @@
package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JDataVersionedWrapperSerializer;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.ObjectSerializer;
import com.usatiuk.objects.iterators.IterProdFn;
import com.usatiuk.objects.iterators.MappingKvIterator;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.utils.ListUtils;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import javax.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
@ApplicationScoped
public class SerializingObjectPersistentStore {
@Inject
JDataVersionedWrapperSerializer serializer;
ObjectSerializer<JDataVersionedWrapper> serializer;
@Inject
ObjectPersistentStore delegateStore;
@Nonnull
Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
return delegateStore.readObject(name).map(serializer::deserialize);
}
public Snapshot<JObjectKey, JDataVersionedWrapper> getSnapshot() {
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
private final Snapshot<JObjectKey, ByteBuffer> _backing = delegateStore.getSnapshot();
private final Snapshot<JObjectKey, ByteString> _backing = delegateStore.getSnapshot();
@Override
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
return ListUtils.map(_backing.getIterator(start, key),
i -> new MappingKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>, MaybeTombstone<JDataVersionedWrapper>>(i,
d -> serializer.deserialize(((DataWrapper<ByteBuffer>) d).value())));
public IterProdFn<JObjectKey, JDataVersionedWrapper> getIterator() {
return (start, key) -> new MappingKvIterator<>(_backing.getIterator(start, key), d -> serializer.deserialize(d));
}
@Nonnull
@@ -62,7 +63,7 @@ public class SerializingObjectPersistentStore {
, objs.deleted());
}
void commitTx(TxManifestObj<? extends JDataVersionedWrapper> objects, long txId) {
delegateStore.commitTx(prepareManifest(objects), txId);
Runnable prepareTx(TxManifestObj<? extends JDataVersionedWrapper> objects, long txId) {
return delegateStore.prepareTx(prepareManifest(objects), txId);
}
}

View File

@@ -3,13 +3,10 @@ package com.usatiuk.objects.stores;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JDataVersionedWrapperImpl;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.MaybeTombstone;
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.objects.transaction.TxCommitException;
import com.usatiuk.objects.transaction.TxRecord;
import com.usatiuk.utils.ListUtils;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
@@ -29,38 +26,34 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.stream.Stream;
@ApplicationScoped
public class WritebackObjectPersistentStore {
@Inject
CachingObjectPersistentStore cachedStore;
@Inject
ExecutorService _callbackExecutor;
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
@ConfigProperty(name = "dhfs.objects.writeback.limit")
int sizeLimit;
private TxBundle _pendingBundle = null;
private int _curSize = 0;
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
long lastFlushedId,
long lastCommittedId) {
}
private final AtomicReference<PendingWriteData> _pendingWrites = new AtomicReference<>(null);
private final ReentrantLock _pendingBundleLock = new ReentrantLock();
private final Object _flushWaitSynchronizer = new Object();
private final Condition _newBundleCondition = _pendingBundleLock.newCondition();
private final Condition _flushCondition = _pendingBundleLock.newCondition();
private final AtomicLong _lastFlushedId = new AtomicLong(-1);
private final AtomicLong _lastCommittedId = new AtomicLong(-1);
private final AtomicLong _lastWrittenId = new AtomicLong(-1);
private final AtomicLong _lastCommittedId = new AtomicLong();
private final AtomicLong _waitedTotal = new AtomicLong(0);
@Inject
CachingObjectPersistentStore cachedStore;
@ConfigProperty(name = "dhfs.objects.writeback.limit")
long sizeLimit;
private long currentSize = 0;
private ExecutorService _writebackExecutor;
private ExecutorService _statusExecutor;
private volatile boolean _ready = false;
void init(@Observes @Priority(120) StartupEvent event) {
@@ -78,8 +71,8 @@ public class WritebackObjectPersistentStore {
try {
while (true) {
Thread.sleep(1000);
if (_curSize > 0)
Log.info("Tx commit status: size=" + _curSize / 1024 / 1024 + "MB");
if (currentSize > 0)
Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB");
}
} catch (InterruptedException ignored) {
}
@@ -89,7 +82,6 @@ public class WritebackObjectPersistentStore {
lastTxId = s.id();
}
_lastCommittedId.set(lastTxId);
_lastFlushedId.set(lastTxId);
_pendingWrites.set(new PendingWriteData(TreePMap.empty(), lastTxId, lastTxId));
_ready = true;
}
@@ -97,14 +89,11 @@ public class WritebackObjectPersistentStore {
void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException {
Log.info("Waiting for all transactions to drain");
_ready = false;
_pendingBundleLock.lock();
try {
while (_curSize > 0) {
_flushCondition.await();
synchronized (_flushWaitSynchronizer) {
_ready = false;
while (currentSize > 0) {
_flushWaitSynchronizer.wait();
}
} finally {
_pendingBundleLock.unlock();
}
_writebackExecutor.shutdownNow();
@@ -118,19 +107,21 @@ public class WritebackObjectPersistentStore {
private void writeback() {
while (!Thread.interrupted()) {
try {
TxBundle bundle;
_pendingBundleLock.lock();
try {
while (_pendingBundle == null)
_newBundleCondition.await();
bundle = _pendingBundle;
_pendingBundle = null;
TxBundle bundle = new TxBundle(0);
synchronized (_pendingBundles) {
while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready)
_pendingBundles.wait();
_curSize -= bundle.size();
assert _curSize == 0;
_flushCondition.signal();
} finally {
_pendingBundleLock.unlock();
long diff = 0;
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var toCompress = _pendingBundles.poll();
diff -= toCompress.size();
bundle.compress(toCompress);
}
diff += bundle.size();
synchronized (_flushWaitSynchronizer) {
currentSize += diff;
}
}
var toWrite = new ArrayList<Pair<JObjectKey, JDataVersionedWrapper>>();
@@ -150,12 +141,15 @@ public class WritebackObjectPersistentStore {
}
}
cachedStore.commitTx(new TxManifestObj<>(toWrite, toDelete), bundle.id());
cachedStore.commitTx(
new TxManifestObj<>(
Collections.unmodifiableList(toWrite),
Collections.unmodifiableList(toDelete)
), bundle.id());
Log.tracev("Bundle {0} committed", bundle.id());
_pendingBundleLock.lock();
try {
while (true) {
var curPw = _pendingWrites.get();
var curPwMap = curPw.pendingWrites();
for (var e : bundle._entries.values()) {
@@ -168,16 +162,25 @@ public class WritebackObjectPersistentStore {
bundle.id(),
curPw.lastCommittedId()
);
_pendingWrites.compareAndSet(curPw, newCurPw);
} finally {
_pendingBundleLock.unlock();
if (_pendingWrites.compareAndSet(curPw, newCurPw))
break;
}
_lastFlushedId.set(bundle.id());
var callbacks = bundle.callbacks();
_callbackExecutor.submit(() -> {
callbacks.forEach(Runnable::run);
});
List<List<Runnable>> callbacks = new ArrayList<>();
synchronized (_notFlushedBundles) {
_lastWrittenId.set(bundle.id());
while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.id()) {
callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted());
}
}
callbacks.forEach(l -> l.forEach(Runnable::run));
synchronized (_flushWaitSynchronizer) {
currentSize -= bundle.size();
// FIXME:
if (currentSize <= sizeLimit || !_ready)
_flushWaitSynchronizer.notifyAll();
}
} catch (InterruptedException ignored) {
} catch (Exception e) {
Log.error("Uncaught exception in writeback", e);
@@ -188,99 +191,125 @@ public class WritebackObjectPersistentStore {
Log.info("Writeback thread exiting");
}
private long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
public long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
verifyReady();
_pendingBundleLock.lock();
try {
boolean shouldWake = false;
if (_curSize > sizeLimit) {
shouldWake = true;
long started = System.currentTimeMillis();
while (_curSize > sizeLimit)
_flushCondition.await();
long waited = System.currentTimeMillis() - started;
_waitedTotal.addAndGet(waited);
if (Log.isTraceEnabled())
Log.tracev("Thread {0} waited for tx bundle for {1} ms", Thread.currentThread().getName(), waited);
}
var oursId = _lastCommittedId.incrementAndGet();
var curBundle = _pendingBundle;
int oldSize = 0;
if (curBundle != null) {
oldSize = curBundle.size();
curBundle.setId(oursId);
} else {
curBundle = new TxBundle(oursId);
}
var curPw = _pendingWrites.get();
var curPwMap = curPw.pendingWrites();
for (var action : writes) {
var key = action.key();
switch (action) {
case TxRecord.TxObjectRecordWrite<?> write -> {
// Log.tracev("Flushing object {0}", write.key());
var wrapper = new JDataVersionedWrapperImpl(write.data(), oursId);
curPwMap = curPwMap.plus(key, new PendingWrite(wrapper, oursId));
curBundle.commit(wrapper);
}
case TxRecord.TxObjectRecordDeleted deleted -> {
// Log.tracev("Deleting object {0}", deleted.key());
curPwMap = curPwMap.plus(key, new PendingDelete(key, oursId));
curBundle.delete(key);
boolean wait = false;
while (true) {
if (wait) {
synchronized (_flushWaitSynchronizer) {
long started = System.currentTimeMillis();
while (currentSize > sizeLimit) {
try {
_flushWaitSynchronizer.wait();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
long waited = System.currentTimeMillis() - started;
_waitedTotal.addAndGet(waited);
if (Log.isTraceEnabled())
Log.tracev("Thread {0} waited for tx bundle for {1} ms", Thread.currentThread().getName(), waited);
wait = false;
}
}
// Now, make the changes visible to new iterators
var newCurPw = new PendingWriteData(
curPwMap,
curPw.lastFlushedId(),
oursId
);
_pendingWrites.compareAndSet(curPw, newCurPw);
synchronized (_pendingBundles) {
synchronized (_flushWaitSynchronizer) {
if (currentSize > sizeLimit) {
if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var target = _pendingBundles.poll();
_pendingBundle = curBundle;
_newBundleCondition.signalAll();
long diff = -target.size();
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var toCompress = _pendingBundles.poll();
diff -= toCompress.size();
target.compress(toCompress);
}
diff += target.size();
currentSize += diff;
_pendingBundles.addFirst(target);
}
}
_curSize += (curBundle.size() - oldSize);
if (currentSize > sizeLimit) {
wait = true;
continue;
}
}
if (shouldWake && _curSize < sizeLimit) {
_flushCondition.signal();
TxBundle bundle;
synchronized (_notFlushedBundles) {
bundle = new TxBundle(_lastCommittedId.incrementAndGet());
_pendingBundles.addLast(bundle);
_notFlushedBundles.put(bundle.id(), bundle);
}
for (var action : writes) {
switch (action) {
case TxRecord.TxObjectRecordWrite<?> write -> {
Log.tracev("Flushing object {0}", write.key());
bundle.commit(new JDataVersionedWrapperImpl(write.data(), bundle.id()));
}
case TxRecord.TxObjectRecordDeleted deleted -> {
Log.tracev("Deleting object {0}", deleted.key());
bundle.delete(deleted.key());
}
default -> {
throw new TxCommitException("Unexpected value: " + action.key());
}
}
}
while (true) {
var curPw = _pendingWrites.get();
var curPwMap = curPw.pendingWrites();
for (var e : ((TxBundle) bundle)._entries.values()) {
switch (e) {
case TxBundle.CommittedEntry c -> {
curPwMap = curPwMap.plus(c.key(), new PendingWrite(c.data, bundle.id()));
}
case TxBundle.DeletedEntry d -> {
curPwMap = curPwMap.plus(d.key(), new PendingDelete(d.key, bundle.id()));
}
default -> throw new IllegalStateException("Unexpected value: " + e);
}
}
// Now, make the changes visible to new iterators
var newCurPw = new PendingWriteData(
curPwMap,
curPw.lastFlushedId(),
bundle.id()
);
if (!_pendingWrites.compareAndSet(curPw, newCurPw))
continue;
((TxBundle) bundle).setReady();
if (_pendingBundles.peek() == bundle)
_pendingBundles.notify();
synchronized (_flushWaitSynchronizer) {
currentSize += ((TxBundle) bundle).size();
}
return bundle.id();
}
}
return oursId;
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
_pendingBundleLock.unlock();
}
}
public void asyncFence(long bundleId, Runnable fn) {
verifyReady();
if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!");
if (_lastFlushedId.get() >= bundleId) {
if (_lastWrittenId.get() >= bundleId) {
fn.run();
return;
}
_pendingBundleLock.lock();
try {
if (_lastFlushedId.get() >= bundleId) {
synchronized (_notFlushedBundles) {
if (_lastWrittenId.get() >= bundleId) {
fn.run();
return;
}
var pendingBundle = _pendingBundle;
if (pendingBundle == null) {
fn.run();
return;
}
pendingBundle.addCallback(fn);
} finally {
_pendingBundleLock.unlock();
_notFlushedBundles.get(bundleId).addCallback(fn);
}
}
@@ -321,8 +350,37 @@ public class WritebackObjectPersistentStore {
private final long txId = finalPw.lastCommittedId();
@Override
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
return ListUtils.prepend(new NavigableMapKvIterator<>(_pendingWrites, start, key), _cache.getIterator(start, key));
public IterProdFn<JObjectKey, JDataVersionedWrapper> getIterator() {
IterProdFn<JObjectKey, JDataVersionedWrapper> cacheItProdFn = new IterProdFn<JObjectKey, JDataVersionedWrapper>() {
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> get(IteratorStart start, JObjectKey key) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getFlat(IteratorStart start, JObjectKey key) {
return Stream.of(new MappingKvIterator<>(
new NavigableMapKvIterator<>(_pendingWrites, start, key),
e -> switch (e) {
case PendingWrite pw -> new Data<>(pw.data());
case PendingDelete d -> new Tombstone<>();
default -> throw new IllegalStateException("Unexpected value: " + e);
}));
}
};
return new IterProdFn<JObjectKey, JDataVersionedWrapper>() {
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> get(IteratorStart start, JObjectKey key) {
return new TombstoneMergingKvIterator<>("writeback-ps", start, key,
cacheItProdFn, _cache.getIterator());
}
@Override
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getFlat(IteratorStart start, JObjectKey key) {
return Stream.concat(cacheItProdFn.getFlat(start, key), _cache.getIterator().getFlat(start, key));
}
};
}
@Nonnull
@@ -331,7 +389,7 @@ public class WritebackObjectPersistentStore {
var cached = _pendingWrites.get(name);
if (cached != null) {
return switch (cached) {
case PendingWrite c -> Optional.of(c.value());
case PendingWrite c -> Optional.of(c.data());
case PendingDelete d -> {
yield Optional.empty();
}
@@ -359,40 +417,41 @@ public class WritebackObjectPersistentStore {
}
}
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
long lastFlushedId,
long lastCommittedId) {
public interface VerboseReadResult {
}
private static class TxBundle {
private final HashMap<JObjectKey, BundleEntry> _entries = new HashMap<>();
private final LinkedHashMap<JObjectKey, BundleEntry> _entries = new LinkedHashMap<>();
private final ArrayList<Runnable> _callbacks = new ArrayList<>();
private int _size = 0;
private long _txId;
ArrayList<Runnable> callbacks() {
return _callbacks;
}
private volatile boolean _ready = false;
private long _size = 0;
private boolean _wasCommitted = false;
private TxBundle(long txId) {
_txId = txId;
}
public void setId(long id) {
_txId = id;
}
public long id() {
return _txId;
}
public void setReady() {
_ready = true;
}
public void addCallback(Runnable callback) {
_callbacks.add(callback);
synchronized (_callbacks) {
if (_wasCommitted) throw new IllegalStateException();
_callbacks.add(callback);
}
}
public int size() {
return _size;
public List<Runnable> setCommitted() {
synchronized (_callbacks) {
_wasCommitted = true;
return Collections.unmodifiableList(_callbacks);
}
}
private void putEntry(BundleEntry entry) {
@@ -411,7 +470,28 @@ public class WritebackObjectPersistentStore {
putEntry(new DeletedEntry(obj));
}
private sealed interface BundleEntry permits CommittedEntry, DeletedEntry {
public long size() {
return _size;
}
public void compress(TxBundle other) {
if (_txId >= other._txId)
throw new IllegalArgumentException("Compressing an older bundle into newer");
_txId = other._txId;
for (var entry : other._entries.values()) {
putEntry(entry);
}
synchronized (_callbacks) {
assert !_wasCommitted;
assert !other._wasCommitted;
_callbacks.addAll(other._callbacks);
}
}
private interface BundleEntry {
JObjectKey key();
int size();
@@ -429,4 +509,10 @@ public class WritebackObjectPersistentStore {
}
}
}
public record VerboseReadResultPersisted(Optional<JDataVersionedWrapper> data) implements VerboseReadResult {
}
public record VerboseReadResultPending(PendingWriteEntry pending) implements VerboseReadResult {
}
}

Some files were not shown because too many files have changed in this diff Show More