31 Commits

Author SHA1 Message Date
0849df60ae Dhfs-fs: remove DhfsFileService 2025-05-05 21:58:37 +02:00
9cb5c226f9 remove dhfs-app 2025-05-05 21:20:07 +02:00
87c404828c add powershell run scripts 2025-05-04 17:44:57 +02:00
b074e8eb44 Dhfs-fs: proper not found unlink exception 2025-05-04 17:13:43 +02:00
eb5b0ae03c cleanup run wrapper 2025-05-03 17:18:28 +02:00
c329c1f982 Objects: nested transactions 2025-05-03 13:57:44 +02:00
4e7b13227b Sync-base: "kick out" inactive peers 2025-05-03 13:14:06 +02:00
db51d7280c Revert "Sync-base: get rid of JDataRemotePush"
This reverts commit 07133a71
2025-05-03 11:25:23 +02:00
70fecb389b Objects: cleanup transaction put a little 2025-05-02 12:50:43 +02:00
6e9a2b25f6 Utils: cleanup UnsafeAccessor a little 2025-05-02 12:39:50 +02:00
b84ef95703 Revert "Objects: simplify tx commit hooks"
This reverts commit c0735801b9.
2025-05-01 13:42:06 +02:00
c0735801b9 Objects: simplify tx commit hooks 2025-05-01 13:27:39 +02:00
b506ced9d5 Objects: simplify WritebackObjectPersistentStore 2025-05-01 10:29:11 +02:00
46bc9fa810 Objects: remove transactionobject 2025-05-01 09:14:50 +02:00
8ab034402d Revert "Objects: simplify cache"
This reverts commit d94d11ec8b.
2025-04-29 20:36:19 +02:00
d94d11ec8b Objects: simplify cache 2025-04-29 20:22:21 +02:00
5beaad2d32 Objects: better iterators 2025-04-29 16:53:26 +02:00
c4484d21e5 Objects: simplify tx commit a little 2025-04-29 16:50:41 +02:00
2766ef1bae Add --enable-preview to run.xml and run 2025-04-29 12:49:03 +02:00
58de85c078 Sync-base: WritebackObjectPersistentStore cleanup 2025-04-29 12:46:39 +02:00
cc9da86440 Sync-base: JObjectKeyImpl import fix 2025-04-29 12:44:41 +02:00
e6c9e6aee9 Dhfs-fuse: implement write_buf for one less copy 2025-04-29 12:44:18 +02:00
62265355c4 Dhfs-fs: a little cleanup in DhfsFileServiceImpl 2025-04-29 12:44:00 +02:00
854bce1627 Utils: UninitializedByteBuffer 2025-04-29 12:43:18 +02:00
1b19c77bb6 Utils: slightly faster add in HashSetDelayedBlockingQueue 2025-04-29 12:40:45 +02:00
7aa968a569 Dhfs-fuse: fix import 2025-04-29 00:45:34 +02:00
e348c39be1 Utils: add UnsafeAccessor to JnrPtrByteOutput
oops
2025-04-28 23:50:31 +02:00
1b54830651 Objects: don't lock some objects twice for no reason 2025-04-28 23:49:45 +02:00
bc5f0b816c Objects: add putNew
to avoid searching for nonexistent objects
2025-04-28 23:47:53 +02:00
9ff914bdaa Utils: move UnsafeAccessor to utils 2025-04-28 23:36:42 +02:00
1cee6f62b8 Utils: less dumb DataLocker 2025-04-28 23:34:30 +02:00
89 changed files with 1948 additions and 2828 deletions

View File

@@ -21,7 +21,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: 'recursive'
submodules: "recursive"
- name: Install sudo for ACT
run: apt-get update && apt-get install -y sudo
@@ -32,7 +32,7 @@ jobs:
- name: User allow other for fuse
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
- name: Dump fuse.conf
run: cat /etc/fuse.conf
@@ -55,7 +55,7 @@ jobs:
- uses: actions/upload-artifact@v4
with:
name: DHFS Server Package
path: dhfs-parent/dhfs-app/target/quarkus-app
path: dhfs-parent/dhfs-fuse/target/quarkus-app
- uses: actions/upload-artifact@v4
if: ${{ always() }}
@@ -212,7 +212,7 @@ jobs:
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
- name: Copy DHFS
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
- name: Copy Webui
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"

View File

@@ -1,17 +1,16 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main"/>
<module name="dhfs-app"/>
<option name="VM_PARAMETERS"
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011"/>
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*"/>
<option name="ENABLED" value="true"/>
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true"/>
</method>
</configuration>
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true" />
</method>
</configuration>
</component>

View File

@@ -1,18 +1,16 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication"
nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main"/>
<module name="dhfs-app"/>
<option name="VM_PARAMETERS"
value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0"/>
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*"/>
<option name="ENABLED" value="true"/>
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true"/>
</method>
</configuration>
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true" />
</method>
</configuration>
</component>

View File

@@ -1,5 +0,0 @@
*
!target/*-runner
!target/*-runner.jar
!target/lib/*
!target/quarkus-app/*

View File

@@ -1,43 +0,0 @@
#Maven
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
release.properties
.flattened-pom.xml
# Eclipse
.project
.classpath
.settings/
bin/
# IntelliJ
.idea
*.ipr
*.iml
*.iws
# NetBeans
nb-configuration.xml
# Visual Studio Code
.vscode
.factorypath
# OSX
.DS_Store
# Vim
*.swp
*.swo
# patch
*.orig
*.rej
# Local environment
.env
# Plugin directory
/.quarkus/cli/plugins/

View File

@@ -1,2 +0,0 @@
FROM azul/zulu-openjdk-debian:21-jre-latest
RUN apt update && apt install -y libfuse2 curl

View File

@@ -1,43 +0,0 @@
version: "3.2"
services:
dhfs1:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs1:/dhfs_root
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-jar /app/quarkus-run.jar"
ports:
- 8080:8080
- 8081:8443
- 5005:5005
dhfs2:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs2:/dhfs_root
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
--add-opens=java.base/java.nio=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
-jar /app/quarkus-run.jar"
ports:
- 8090:8080
- 8091:8443
- 5010:5010

View File

@@ -1,172 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-app</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fuse</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,97 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
# We make four distinct layers so if there are application changes the library layers can be re-used
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
COPY --chown=185 target/quarkus-app/*.jar /deployments/
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -1,93 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
COPY target/lib/* /deployments/lib/
COPY target/*-runner.jar /deployments/quarkus-run.jar
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -1,27 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,30 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
# It uses a micro base image, tuned for Quarkus native executables.
# It reduces the size of the resulting container image.
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM quay.io/quarkus/quarkus-micro-image:2.0
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,34 +0,0 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.port=42069
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.peerdiscovery.broadcast=true
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.objects.reconnect_interval=5s
dhfs.objects.write_log=false
dhfs.objects.periodic-push-op-interval=5m
dhfs.fuse.root=${HOME}/dhfs_default/fuse
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=2097152
dhfs.files.target_chunk_alignment=19
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.files.use_hash_for_chunks=false
dhfs.objects.autosync.threads=16
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=16
dhfs.objects.ref-processor.threads=16
dhfs.objects.opsender.batch-size=100
dhfs.objects.lock_timeout_secs=2
dhfs.local-discovery=true
dhfs.peerdiscovery.timeout=10000
quarkus.log.category."com.usatiuk".min-level=TRACE
quarkus.log.category."com.usatiuk".level=TRACE
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required

View File

@@ -1,29 +0,0 @@
package com.usatiuk.dhfsapp;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -1,44 +0,0 @@
package com.usatiuk.dhfsapp;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
public static void purgeDirectory(File dir) {
try {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
} catch (Exception e) {
Log.error("Couldn't purge directory " + dir, e);
}
}
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
}

View File

@@ -1,11 +0,0 @@
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false
dhfs.objects.persistence.snapshot-extra-checks=true

View File

@@ -2,47 +2,645 @@ package com.usatiuk.dhfsfs.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.dhfs.jmap.JMapEntry;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfsfs.objects.ChunkData;
import com.usatiuk.dhfsfs.objects.File;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.util.Optional;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.*;
import java.util.stream.StreamSupport;
public interface DhfsFileService {
Optional<JObjectKey> open(String name);
@ApplicationScoped
public class DhfsFileService {
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
int maxChunkSize;
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
boolean useHashForChunks;
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
boolean allowRecursiveDelete;
@ConfigProperty(name = "dhfs.objects.ref_verification")
boolean refVerification;
@ConfigProperty(name = "dhfs.objects.write_log")
boolean writeLogging;
Optional<JObjectKey> create(String name, long mode);
@Inject
Transaction curTx;
@Inject
RemoteTransaction remoteTx;
@Inject
TransactionManager jObjectTxManager;
@Inject
JKleppmannTreeManager jKleppmannTreeManager;
@Inject
JMapHelper jMapHelper;
Pair<String, JObjectKey> inoToParent(JObjectKey ino);
void mkdir(String name, long mode);
Optional<GetattrRes> getattr(JObjectKey name);
Boolean chmod(JObjectKey name, long mode);
void unlink(String name);
Boolean rename(String from, String to);
Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs);
Iterable<String> readDir(String name);
long size(JObjectKey fileUuid);
ByteString read(JObjectKey fileUuid, long offset, int length);
Long write(JObjectKey fileUuid, long offset, ByteString data);
default Long write(JObjectKey fileUuid, long offset, byte[] data) {
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
}
Boolean truncate(JObjectKey fileUuid, long length);
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC, () -> new JKleppmannTreeNodeMetaDirectory(""));
}
String readlink(JObjectKey uuid);
private ChunkData createChunk(ByteString bytes) {
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
remoteTx.putDataNew(newChunk);
return newChunk;
}
ByteString readlinkBS(JObjectKey uuid);
void init(@Observes @Priority(500) StartupEvent event) {
Log.info("Initializing file service");
getTreeW();
}
JObjectKey symlink(String oldpath, String newpath);
private JKleppmannTreeNode getDirEntryW(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private JKleppmannTreeNode getDirEntryR(String name) {
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) return Optional.empty();
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
return ret;
}
public Optional<GetattrRes> getattr(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var ref = curTx.get(JData.class, uuid).orElse(null);
if (ref == null) return Optional.empty();
GetattrRes ret;
if (ref instanceof RemoteObjectMeta r) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
} else if (ref instanceof JKleppmannTreeNodeHolder) {
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
return Optional.of(ret);
});
}
public Optional<JObjectKey> open(String name) {
return jObjectTxManager.executeTx(() -> {
try {
var ret = getDirEntryR(name);
return switch (ret.meta()) {
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
default -> Optional.empty();
};
} catch (StatusRuntimeException e) {
if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
return Optional.empty();
}
throw e;
}
});
}
private void ensureDir(JKleppmannTreeNode entry) {
if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory))
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
}
public Optional<JObjectKey> create(String name, long mode) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String fname = path.getFileName().toString();
var fuuid = UUID.randomUUID();
Log.debug("Creating file " + fuuid);
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
remoteTx.putData(f);
try {
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
} catch (Exception e) {
// fobj.getMeta().removeRef(newNodeId);
throw e;
}
return Optional.of(f.key());
});
}
//FIXME: Slow..
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
return jObjectTxManager.executeTx(() -> {
return getTreeW().findParent(w -> {
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
return f.fileIno().equals(ino);
return false;
});
});
}
public void mkdir(String name, long mode) {
jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String dname = path.getFileName().toString();
Log.debug("Creating directory " + name);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
});
}
public void unlink(String name) {
jObjectTxManager.executeTx(() -> {
var node = getDirEntryOpt(name).orElse(null);
if (node == null)
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
if (!allowRecursiveDelete && !node.children().isEmpty())
throw new DirectoryNotEmptyException();
}
getTreeW().trash(node.meta(), node.key());
});
}
public Boolean rename(String from, String to) {
return jObjectTxManager.executeTx(() -> {
var node = getDirEntryW(from);
JKleppmannTreeNodeMeta meta = node.meta();
var toPath = Path.of(to);
var toDentry = getDirEntryW(toPath.getParent().toString());
ensureDir(toDentry);
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
return true;
});
}
public Boolean chmod(JObjectKey uuid, long mode) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
if (dent instanceof JKleppmannTreeNodeHolder) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withMode(mode).withCurrentMTime());
return true;
} else {
throw new IllegalArgumentException(uuid + " is not a file");
}
} else {
throw new IllegalArgumentException(uuid + " is not a file");
}
});
}
public Iterable<String> readDir(String name) {
return jObjectTxManager.executeTx(() -> {
var found = getDirEntryW(name);
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
return found.children().keySet();
});
}
public ByteString read(JObjectKey fileUuid, long offset, int length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
}
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
if (!it.hasNext())
return ByteString.empty();
// if (it.peekNextKey().key() != offset) {
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
// return Optional.of(ByteString.empty());
// }
long curPos = offset;
ByteString buf = ByteString.empty();
var chunk = it.next();
while (curPos < offset + length) {
var chunkPos = chunk.getKey().key();
long offInChunk = curPos - chunkPos;
long toReadInChunk = (offset + length) - curPos;
var chunkBytes = readChunk(chunk.getValue().ref());
long readableLen = chunkBytes.size() - offInChunk;
var toReadReally = Math.min(readableLen, toReadInChunk);
if (toReadReally < 0) break;
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
curPos += toReadReally;
if (readableLen > toReadInChunk)
break;
if (!it.hasNext()) break;
chunk = it.next();
}
return buf;
} catch (Exception e) {
Log.error("Error reading file: " + fileUuid, e);
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
}
});
}
private ByteString readChunk(JObjectKey uuid) {
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
if (chunkRead == null) {
Log.error("Chunk requested not found: " + uuid);
throw new StatusRuntimeException(Status.NOT_FOUND);
}
return chunkRead.data();
}
private int getChunkSize(JObjectKey uuid) {
return readChunk(uuid).size();
}
private long alignDown(long num, long n) {
return num & -(1L << n);
}
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
return jObjectTxManager.executeTx(() -> {
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
if (file == null) {
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
}
Map<Long, JObjectKey> removedChunks = new HashMap<>();
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
long writeEnd = offset + data.size();
long start = realOffset;
long existingEnd = 0;
ByteString pendingPrefix = ByteString.empty();
ByteString pendingSuffix = ByteString.empty();
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
while (it.hasNext()) {
var curEntry = it.next();
long curChunkStart = curEntry.getKey().key();
var curChunkId = curEntry.getValue().ref();
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
existingEnd = curChunkEnd;
if (curChunkEnd <= realOffset) break;
removedChunks.put(curEntry.getKey().key(), curChunkId);
if (curChunkStart < offset) {
if (curChunkStart < start)
start = curChunkStart;
var readChunk = readChunk(curChunkId);
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
}
if (curChunkEnd > writeEnd) {
var readChunk = readChunk(curChunkId);
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
}
if (curChunkEnd >= writeEnd) break;
}
}
Map<Long, JObjectKey> newChunks = new HashMap<>();
if (existingEnd < offset) {
if (!pendingPrefix.isEmpty()) {
int diff = Math.toIntExact(offset - existingEnd);
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
} else {
fillZeros(existingEnd, offset, newChunks);
start = offset;
}
}
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
int combinedSize = pendingWrites.size();
{
int cur = 0;
while (cur < combinedSize) {
int end;
if (combinedSize - cur < maxChunkSize)
end = combinedSize;
else if (targetChunkAlignment < 0)
end = combinedSize;
else
end = Math.min(cur + targetChunkSize, combinedSize);
var thisChunk = pendingWrites.substring(cur, end);
ChunkData newChunkData = createChunk(thisChunk);
newChunks.put(start, newChunkData.key());
start += thisChunk.size();
cur = end;
}
}
for (var e : removedChunks.entrySet()) {
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file.withCurrentMTime());
return (long) data.size();
});
}
public Boolean truncate(JObjectKey fileUuid, long length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to write: " + fileUuid);
return false;
}
if (length == 0) {
jMapHelper.deleteAll(file);
remoteTx.putData(file);
return true;
}
var curSize = size(fileUuid);
if (curSize == length) return true;
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
if (curSize < length) {
fillZeros(curSize, length, newChunks);
} else {
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) {
last = it.hasNext() ? it.next() : null;
while (it.hasNext()) {
var next = it.next();
removedChunks.put(next.getKey().key(), next.getValue().ref());
}
}
removedChunks.put(last.getKey().key(), last.getValue().ref());
//
// NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
//
// long start = 0;
//
// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
// first = it.hasNext() ? it.next() : null;
// boolean empty = last == null;
// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
// first = null;
// last = null;
// start = offset;
// } else if (!empty) {
// assert first != null;
// removedChunks.put(first.getKey().key(), first.getValue().ref());
// while (it.hasNext() && it.peekNextKey() != last.getKey()) {
// var next = it.next();
// removedChunks.put(next.getKey().key(), next.getValue().ref());
// }
// removedChunks.put(last.getKey().key(), last.getValue().ref());
// }
// }
//
// var tail = chunksAll.lowerEntry(length);
// var afterTail = chunksAll.tailMap(tail.getKey(), false);
//
// removedChunks.put(tail.getKey(), tail.getValue());
// removedChunks.putAll(afterTail);
var tailBytes = readChunk(last.getValue().ref());
var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key()));
ChunkData newChunkData = createChunk(newChunk);
newChunks.put(last.getKey().key(), newChunkData.key());
}
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
for (var e : removedChunks.entrySet()) {
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file.withCurrentMTime());
return true;
});
}
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
long combinedSize = (length - fillStart);
long start = fillStart;
// Hack
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
{
long cur = 0;
while (cur < combinedSize) {
long end;
if (targetChunkSize <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
end = cur + targetChunkSize;
} else {
end = combinedSize;
}
}
if (!zeroCache.containsKey(end - cur))
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
ChunkData newChunkData = zeroCache.get(end - cur);
newChunks.put(start, newChunkData.key());
start += newChunkData.data().size();
cur = end;
}
}
}
public String readlink(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
return readlinkBS(uuid).toStringUtf8();
});
}
public ByteString readlinkBS(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
return read(uuid, 0, Math.toIntExact(size(uuid)));
});
}
public JObjectKey symlink(String oldpath, String newpath) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(newpath);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String fname = path.getFileName().toString();
var fuuid = UUID.randomUUID();
Log.debug("Creating file " + fuuid);
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
remoteTx.putData(f);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
return f.key();
});
}
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
// FIXME:
if (dent instanceof JKleppmannTreeNodeHolder) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
return true;
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
});
}
public long size(JObjectKey fileUuid) {
return jObjectTxManager.executeTx(() -> {
long realSize = 0;
var file = remoteTx.getData(File.class, fileUuid)
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
last = it.hasNext() ? it.next() : null;
}
if (last != null) {
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
}
return realSize;
});
}
public Long write(JObjectKey fileUuid, long offset, byte[] data) {
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
}
}

View File

@@ -1,667 +0,0 @@
package com.usatiuk.dhfsfs.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.dhfs.jmap.JMapEntry;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfsfs.objects.ChunkData;
import com.usatiuk.dhfsfs.objects.File;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.*;
import java.util.stream.StreamSupport;
@ApplicationScoped
public class DhfsFileServiceImpl implements DhfsFileService {
@Inject
Transaction curTx;
@Inject
RemoteTransaction remoteTx;
@Inject
TransactionManager jObjectTxManager;
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
boolean useHashForChunks;
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
boolean allowRecursiveDelete;
@ConfigProperty(name = "dhfs.objects.ref_verification")
boolean refVerification;
@ConfigProperty(name = "dhfs.objects.write_log")
boolean writeLogging;
@Inject
JKleppmannTreeManager jKleppmannTreeManager;
@Inject
JMapHelper jMapHelper;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC, () -> new JKleppmannTreeNodeMetaDirectory(""));
}
private ChunkData createChunk(ByteString bytes) {
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
remoteTx.putData(newChunk);
return newChunk;
}
void init(@Observes @Priority(500) StartupEvent event) {
Log.info("Initializing file service");
getTreeW();
}
private JKleppmannTreeNode getDirEntryW(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private JKleppmannTreeNode getDirEntryR(String name) {
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) return Optional.empty();
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
return ret;
}
@Override
public Optional<GetattrRes> getattr(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var ref = curTx.get(JData.class, uuid).orElse(null);
if (ref == null) return Optional.empty();
GetattrRes ret;
if (ref instanceof RemoteObjectMeta r) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
} else if (ref instanceof JKleppmannTreeNodeHolder) {
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
return Optional.of(ret);
});
}
@Override
public Optional<JObjectKey> open(String name) {
return jObjectTxManager.executeTx(() -> {
try {
var ret = getDirEntryR(name);
return switch (ret.meta()) {
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
default -> Optional.empty();
};
} catch (StatusRuntimeException e) {
if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
return Optional.empty();
}
throw e;
}
});
}
private void ensureDir(JKleppmannTreeNode entry) {
if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory))
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
}
@Override
public Optional<JObjectKey> create(String name, long mode) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String fname = path.getFileName().toString();
var fuuid = UUID.randomUUID();
Log.debug("Creating file " + fuuid);
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
remoteTx.putData(f);
try {
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
} catch (Exception e) {
// fobj.getMeta().removeRef(newNodeId);
throw e;
}
return Optional.of(f.key());
});
}
//FIXME: Slow..
@Override
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
return jObjectTxManager.executeTx(() -> {
return getTreeW().findParent(w -> {
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
return f.fileIno().equals(ino);
return false;
});
});
}
@Override
public void mkdir(String name, long mode) {
jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String dname = path.getFileName().toString();
Log.debug("Creating directory " + name);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
});
}
@Override
public void unlink(String name) {
jObjectTxManager.executeTx(() -> {
var node = getDirEntryOpt(name).orElse(null);
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
if (!allowRecursiveDelete && !node.children().isEmpty())
throw new DirectoryNotEmptyException();
}
getTreeW().trash(node.meta(), node.key());
});
}
@Override
public Boolean rename(String from, String to) {
return jObjectTxManager.executeTx(() -> {
var node = getDirEntryW(from);
JKleppmannTreeNodeMeta meta = node.meta();
var toPath = Path.of(to);
var toDentry = getDirEntryW(toPath.getParent().toString());
ensureDir(toDentry);
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
return true;
});
}
@Override
public Boolean chmod(JObjectKey uuid, long mode) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
if (dent instanceof JKleppmannTreeNodeHolder) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withMode(mode).withCurrentMTime());
return true;
} else {
throw new IllegalArgumentException(uuid + " is not a file");
}
} else {
throw new IllegalArgumentException(uuid + " is not a file");
}
});
}
@Override
public Iterable<String> readDir(String name) {
return jObjectTxManager.executeTx(() -> {
var found = getDirEntryW(name);
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
return found.children().keySet();
});
}
@Override
public ByteString read(JObjectKey fileUuid, long offset, int length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
}
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
if (!it.hasNext())
return ByteString.empty();
// if (it.peekNextKey().key() != offset) {
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
// return Optional.of(ByteString.empty());
// }
long curPos = offset;
ByteString buf = ByteString.empty();
var chunk = it.next();
while (curPos < offset + length) {
var chunkPos = chunk.getKey().key();
long offInChunk = curPos - chunkPos;
long toReadInChunk = (offset + length) - curPos;
var chunkBytes = readChunk(chunk.getValue().ref());
long readableLen = chunkBytes.size() - offInChunk;
var toReadReally = Math.min(readableLen, toReadInChunk);
if (toReadReally < 0) break;
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
curPos += toReadReally;
if (readableLen > toReadInChunk)
break;
if (!it.hasNext()) break;
chunk = it.next();
}
return buf;
} catch (Exception e) {
Log.error("Error reading file: " + fileUuid, e);
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
}
});
}
private ByteString readChunk(JObjectKey uuid) {
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
if (chunkRead == null) {
Log.error("Chunk requested not found: " + uuid);
throw new StatusRuntimeException(Status.NOT_FOUND);
}
return chunkRead.data();
}
private int getChunkSize(JObjectKey uuid) {
return readChunk(uuid).size();
}
private long alignDown(long num, long n) {
return num & -(1L << n);
}
@Override
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
return jObjectTxManager.executeTx(() -> {
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
if (file == null) {
Log.error("File not found when trying to write: " + fileUuid);
return -1L;
}
if (writeLogging) {
Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " "
+ offset + " " + data.size());
}
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
long writeEnd = offset + data.size();
long start = realOffset;
long existingEnd = 0;
ByteString pendingPrefix = ByteString.empty();
ByteString pendingSuffix = ByteString.empty();
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
while (it.hasNext()) {
var curEntry = it.next();
long curChunkStart = curEntry.getKey().key();
var curChunkId = curEntry.getValue().ref();
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
existingEnd = curChunkEnd;
if (curChunkEnd <= realOffset) break;
removedChunks.put(curEntry.getKey().key(), curChunkId);
if (curChunkStart < offset) {
if (curChunkStart < start)
start = curChunkStart;
var readChunk = readChunk(curChunkId);
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
}
if (curChunkEnd > writeEnd) {
var readChunk = readChunk(curChunkId);
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
}
if (curChunkEnd >= writeEnd) break;
}
}
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
if (existingEnd < offset) {
if (!pendingPrefix.isEmpty()) {
int diff = Math.toIntExact(offset - existingEnd);
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
} else {
fillZeros(existingEnd, offset, newChunks);
start = offset;
}
}
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
int combinedSize = pendingWrites.size();
{
int targetChunkSize = 1 << targetChunkAlignment;
int cur = 0;
while (cur < combinedSize) {
int end;
if (targetChunkAlignment < 0)
end = combinedSize;
else
end = Math.min(cur + targetChunkSize, combinedSize);
var thisChunk = pendingWrites.substring(cur, end);
ChunkData newChunkData = createChunk(thisChunk);
newChunks.put(start, newChunkData.key());
start += thisChunk.size();
cur = end;
}
}
for (var e : removedChunks.entrySet()) {
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file.withCurrentMTime());
return (long) data.size();
});
}
@Override
public Boolean truncate(JObjectKey fileUuid, long length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to write: " + fileUuid);
return false;
}
if (length == 0) {
jMapHelper.deleteAll(file);
remoteTx.putData(file);
return true;
}
var curSize = size(fileUuid);
if (curSize == length) return true;
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
if (curSize < length) {
fillZeros(curSize, length, newChunks);
} else {
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) {
last = it.hasNext() ? it.next() : null;
while (it.hasNext()) {
var next = it.next();
removedChunks.put(next.getKey().key(), next.getValue().ref());
}
}
removedChunks.put(last.getKey().key(), last.getValue().ref());
//
// NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
//
// long start = 0;
//
// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
// first = it.hasNext() ? it.next() : null;
// boolean empty = last == null;
// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
// first = null;
// last = null;
// start = offset;
// } else if (!empty) {
// assert first != null;
// removedChunks.put(first.getKey().key(), first.getValue().ref());
// while (it.hasNext() && it.peekNextKey() != last.getKey()) {
// var next = it.next();
// removedChunks.put(next.getKey().key(), next.getValue().ref());
// }
// removedChunks.put(last.getKey().key(), last.getValue().ref());
// }
// }
//
// var tail = chunksAll.lowerEntry(length);
// var afterTail = chunksAll.tailMap(tail.getKey(), false);
//
// removedChunks.put(tail.getKey(), tail.getValue());
// removedChunks.putAll(afterTail);
var tailBytes = readChunk(last.getValue().ref());
var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key()));
ChunkData newChunkData = createChunk(newChunk);
newChunks.put(last.getKey().key(), newChunkData.key());
}
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
for (var e : removedChunks.entrySet()) {
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file.withCurrentMTime());
return true;
});
}
private void fillZeros(long fillStart, long length, NavigableMap<Long, JObjectKey> newChunks) {
long combinedSize = (length - fillStart);
long start = fillStart;
// Hack
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
{
long cur = 0;
while (cur < combinedSize) {
long end;
if (targetChunkSize <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
end = cur + targetChunkSize;
} else {
end = combinedSize;
}
}
if (!zeroCache.containsKey(end - cur))
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
ChunkData newChunkData = zeroCache.get(end - cur);
newChunks.put(start, newChunkData.key());
start += newChunkData.data().size();
cur = end;
}
}
}
@Override
public String readlink(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
return readlinkBS(uuid).toStringUtf8();
});
}
@Override
public ByteString readlinkBS(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
return read(uuid, 0, Math.toIntExact(size(uuid)));
});
}
@Override
public JObjectKey symlink(String oldpath, String newpath) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(newpath);
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String fname = path.getFileName().toString();
var fuuid = UUID.randomUUID();
Log.debug("Creating file " + fuuid);
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
remoteTx.putData(f);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
return f.key();
});
}
@Override
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
// FIXME:
if (dent instanceof JKleppmannTreeNodeHolder) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
return true;
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
});
}
@Override
public long size(JObjectKey fileUuid) {
return jObjectTxManager.executeTx(() -> {
long realSize = 0;
var file = remoteTx.getData(File.class, fileUuid)
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
last = it.hasNext() ? it.next() : null;
}
if (last != null) {
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
}
return realSize;
});
}
}

View File

@@ -30,7 +30,7 @@ public class TestDataCleaner {
purgeDirectory(Path.of(tempDirectory).toFile());
}
void purgeDirectory(File dir) {
public void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);

View File

@@ -139,16 +139,13 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
true
false
</junit.jupiter.execution.parallel.enabled>
<junit.jupiter.execution.parallel.mode.default>
concurrent
</junit.jupiter.execution.parallel.mode.default>
<junit.jupiter.execution.parallel.config.dynamic.factor>
0.5
</junit.jupiter.execution.parallel.config.dynamic.factor>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>

View File

@@ -1,12 +1,15 @@
package com.usatiuk.dhfsfuse;
import com.google.protobuf.UnsafeByteOperations;
import com.kenai.jffi.MemoryIO;
import com.sun.security.auth.module.UnixSystem;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.dhfsfs.service.DirectoryNotEmptyException;
import com.usatiuk.dhfsfs.service.GetattrRes;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.utils.UninitializedByteBuffer;
import com.usatiuk.utils.UnsafeAccessor;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -17,15 +20,15 @@ import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import jnr.ffi.Pointer;
import jnr.ffi.Runtime;
import jnr.ffi.Struct;
import jnr.ffi.types.off_t;
import org.apache.commons.lang3.SystemUtils;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import ru.serce.jnrfuse.ErrorCodes;
import ru.serce.jnrfuse.FuseFillDir;
import ru.serce.jnrfuse.FuseStubFS;
import ru.serce.jnrfuse.struct.FileStat;
import ru.serce.jnrfuse.struct.FuseFileInfo;
import ru.serce.jnrfuse.struct.Statvfs;
import ru.serce.jnrfuse.struct.Timespec;
import ru.serce.jnrfuse.struct.*;
import java.nio.ByteBuffer;
import java.nio.file.Paths;
@@ -51,8 +54,6 @@ public class DhfsFuse extends FuseStubFS {
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@Inject
JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors;
@Inject
DhfsFileService fileService;
private long allocateHandle(JObjectKey key) {
@@ -231,7 +232,7 @@ public class DhfsFuse extends FuseStubFS {
var fileKey = getFromHandle(fi.fh.get());
var read = fileService.read(fileKey, offset, (int) size);
if (read.isEmpty()) return 0;
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
return read.size();
} catch (Throwable e) {
Log.error("When reading " + path, e);
@@ -241,24 +242,22 @@ public class DhfsFuse extends FuseStubFS {
@Override
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
var buffer = UninitializedByteBuffer.allocate((int) size);
UnsafeAccessor.UNSAFE.copyMemory(
buf.address(),
UnsafeAccessor.NIO.getBufferAddress(buffer),
size
);
return write(path, buffer, offset, fi);
}
public int write(String path, ByteBuffer buffer, long offset, FuseFileInfo fi) {
if (offset < 0) return -ErrorCodes.EINVAL();
try {
var fileKey = getFromHandle(fi.fh.get());
var buffer = ByteBuffer.allocateDirect((int) size);
if (buffer.isDirect()) {
jnrPtrByteOutputAccessors.getUnsafe().copyMemory(
buf.address(),
jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer),
size
);
} else {
buf.get(0, buffer.array(), 0, (int) size);
}
var written = fileService.write(fileKey, offset, UnsafeByteOperations.unsafeWrap(buffer));
return written.intValue();
} catch (Throwable e) {
} catch (Exception e) {
Log.error("When writing " + path, e);
return -ErrorCodes.EIO();
}
@@ -394,7 +393,7 @@ public class DhfsFuse extends FuseStubFS {
var file = fileOpt.get();
var read = fileService.readlinkBS(fileOpt.get());
if (read.isEmpty()) return 0;
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
buf.putByte(Math.min(size - 1, read.size()), (byte) 0);
return 0;
} catch (Throwable e) {
@@ -426,4 +425,29 @@ public class DhfsFuse extends FuseStubFS {
return -ErrorCodes.EIO();
}
}
@Override
public int write_buf(String path, FuseBufvec buf, @off_t long off, FuseFileInfo fi) {
int size = (int) libFuse.fuse_buf_size(buf);
FuseBufvec tmpVec = new FuseBufvec(Runtime.getSystemRuntime());
long tmpVecAddr = MemoryIO.getInstance().allocateMemory(Struct.size(tmpVec), false);
try {
tmpVec.useMemory(Pointer.wrap(Runtime.getSystemRuntime(), tmpVecAddr));
FuseBufvec.init(tmpVec, size);
var bb = UninitializedByteBuffer.allocate(size);
var mem = UninitializedByteBuffer.getAddress(bb);
tmpVec.buf.mem.set(mem);
tmpVec.buf.size.set(size);
int res = (int) libFuse.fuse_buf_copy(tmpVec, buf, 0);
if (res != size) {
Log.errorv("fuse_buf_copy failed: {0} != {1}", res, size);
return -ErrorCodes.ENOMEM();
}
return write(path, bb, off, fi);
} finally {
if (tmpVecAddr != 0) {
MemoryIO.getInstance().freeMemory(tmpVecAddr);
}
}
}
}

View File

@@ -1,6 +1,7 @@
package com.usatiuk.dhfsfuse;
import com.google.protobuf.ByteOutput;
import com.usatiuk.utils.UnsafeAccessor;
import jnr.ffi.Pointer;
import java.nio.ByteBuffer;
@@ -9,14 +10,12 @@ import java.nio.MappedByteBuffer;
public class JnrPtrByteOutput extends ByteOutput {
private final Pointer _backing;
private final long _size;
private final JnrPtrByteOutputAccessors _accessors;
private long _pos;
public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) {
public JnrPtrByteOutput(Pointer backing, long size) {
_backing = backing;
_size = size;
_pos = 0;
_accessors = accessors;
}
@Override
@@ -47,9 +46,9 @@ public class JnrPtrByteOutput extends ByteOutput {
if (value instanceof MappedByteBuffer mb) {
mb.load();
}
long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position();
long addr = UnsafeAccessor.NIO.getBufferAddress(value) + value.position();
var out = _backing.address() + _pos;
_accessors.getUnsafe().copyMemory(addr, out, rem);
UnsafeAccessor.UNSAFE.copyMemory(addr, out, rem);
} else {
_backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem);
}

View File

@@ -1,29 +0,0 @@
package com.usatiuk.dhfsfuse;
import jakarta.inject.Singleton;
import jdk.internal.access.JavaNioAccess;
import jdk.internal.access.SharedSecrets;
import sun.misc.Unsafe;
import java.lang.reflect.Field;
@Singleton
class JnrPtrByteOutputAccessors {
JavaNioAccess _nioAccess;
Unsafe _unsafe;
JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException {
_nioAccess = SharedSecrets.getJavaNioAccess();
Field f = Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
_unsafe = (Unsafe) f.get(null);
}
public JavaNioAccess getNioAccess() {
return _nioAccess;
}
public Unsafe getUnsafe() {
return _unsafe;
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsapp;
package com.usatiuk.dhfsfuse;
import io.quarkus.runtime.Quarkus;
import io.quarkus.runtime.QuarkusApplication;

View File

@@ -14,8 +14,9 @@ dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=2097152
dhfs.files.target_chunk_alignment=19
dhfs.files.target_chunk_size=524288
dhfs.files.max_chunk_size=524288
dhfs.files.target_chunk_alignment=17
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true

View File

@@ -30,7 +30,7 @@ public class TestDataCleaner {
purgeDirectory(Path.of(tempDirectory).toFile());
}
void purgeDirectory(File dir) {
public static void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsapp.integration;
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import io.quarkus.logging.Log;
@@ -168,35 +168,6 @@ public class DhfsFuseIT {
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
// TODO: How this fits with the tree?
@Test
@Disabled
void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1);
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void deleteTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
@@ -221,6 +192,28 @@ public class DhfsFuseIT {
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void deleteTestKickedOut() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("kicked"), 60, TimeUnit.SECONDS, 1);
Log.info("Deleting");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
Log.info("Deleted");
// FIXME?
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() ->
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
Log.info("Creating");

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsapp.integration;
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import io.quarkus.logging.Log;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsapp.integration;
package com.usatiuk.dhfsfuse.integration;
import io.quarkus.logging.Log;
import org.jetbrains.annotations.NotNull;
@@ -70,6 +70,7 @@ public class DhfsImage implements Future<String> {
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED",
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED",
"--add-opens=java.base/java.nio=ALL-UNNAMED",
"--enable-preview",
"-Ddhfs.objects.peerdiscovery.interval=1s",
"-Ddhfs.objects.invalidation.delay=100",
"-Ddhfs.objects.deletion.delay=0",
@@ -78,6 +79,8 @@ public class DhfsImage implements Future<String> {
"-Ddhfs.objects.sync.timeout=30",
"-Ddhfs.objects.sync.ping.timeout=5",
"-Ddhfs.objects.reconnect_interval=1s",
"-Ddhfs.objects.last-seen.timeout=30",
"-Ddhfs.objects.last-seen.update=10",
"-Ddhfs.sync.cert-check=false",
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",

View File

@@ -1,7 +1,7 @@
package com.usatiuk.dhfsapp.integration;
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsapp.TestDataCleaner;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.slf4j.LoggerFactory;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsapp.integration;
package com.usatiuk.dhfsfuse.integration;
import io.quarkus.logging.Log;

View File

@@ -1,7 +1,7 @@
package com.usatiuk.dhfsapp.integration;
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsapp.TestDataCleaner;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.junit.jupiter.params.ParameterizedTest;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfsapp.integration;
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import org.junit.jupiter.api.*;

View File

@@ -1,5 +1,7 @@
package com.usatiuk.objects;
import com.usatiuk.utils.UninitializedByteBuffer;
import java.io.Serial;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
@@ -46,7 +48,7 @@ public final class JObjectKeyImpl implements JObjectKey {
synchronized (this) {
if (_bb != null) return _bb;
var bytes = value.getBytes(StandardCharsets.ISO_8859_1);
var directBb = ByteBuffer.allocateDirect(bytes.length);
var directBb = UninitializedByteBuffer.allocate(bytes.length);
directBb.put(bytes);
directBb.flip();
_bb = directBb;

View File

@@ -1,6 +0,0 @@
package com.usatiuk.objects.iterators;
@FunctionalInterface
public interface IterProdFn<K extends Comparable<K>, V> {
CloseableKvIterator<K, V> get(IteratorStart start, K key);
}

View File

@@ -11,17 +11,15 @@ import java.util.TreeMap;
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final NavigableMap<K, IteratorEntry<K, V>> _sortedIterators = new TreeMap<>();
private final String _name;
private final List<IteratorEntry<K, V>> _iterators;
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
_goingForward = true;
_name = name;
// Why streams are so slow?
public MergingKvIterator(IteratorStart startType, K startKey, List<CloseableKvIterator<K, V>> iterators) {
_goingForward = true;
{
IteratorEntry<K, V>[] iteratorEntries = new IteratorEntry[iterators.size()];
for (int i = 0; i < iterators.size(); i++) {
iteratorEntries[i] = new IteratorEntry<>(i, iterators.get(i).get(startType, startKey));
iteratorEntries[i] = new IteratorEntry<>(i, iterators.get(i));
}
_iterators = List.of(iteratorEntries);
}
@@ -91,8 +89,8 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
}
@SafeVarargs
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
this(name, startType, startKey, List.of(iterators));
public MergingKvIterator(IteratorStart startType, K startKey, CloseableKvIterator<K, V>... iterators) {
this(startType, startKey, List.of(iterators));
}
private void advanceIterator(IteratorEntry<K, V> iteratorEntry) {
@@ -151,7 +149,6 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|| (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) {
skipImpl();
}
Log.tracev("{0} Reversed to {1}", _name, _sortedIterators);
}
@Override
@@ -199,28 +196,14 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
@Override
public String toString() {
return "MergingKvIterator{" +
"_name='" + _name + '\'' +
", _sortedIterators=" + _sortedIterators.keySet() +
", _iterators=" + _iterators +
'}';
}
private interface FirstMatchState<K extends Comparable<K>, V> {
}
private record IteratorEntry<K extends Comparable<K>, V>(int priority, CloseableKvIterator<K, V> iterator) {
public IteratorEntry<K, V> reversed() {
return new IteratorEntry<>(priority, iterator.reversed());
}
}
private record FirstMatchNone<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
}
private record FirstMatchFound<K extends Comparable<K>, V>(
CloseableKvIterator<K, V> iterator) implements FirstMatchState<K, V> {
}
private record FirstMatchConsumed<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
}
}

View File

@@ -1,22 +0,0 @@
package com.usatiuk.objects.iterators;
import java.util.List;
public abstract class TombstoneMergingKvIterator {
public static <K extends Comparable<K>, V> CloseableKvIterator<K, V> of(String name, IteratorStart startType, K startKey, List<IterProdFn<K, MaybeTombstone<V>>> iterators) {
return new PredicateKvIterator<K, MaybeTombstone<V>, V>(
new MergingKvIterator<K, MaybeTombstone<V>>(name + "-merging", startType, startKey, iterators),
startType, startKey,
pair -> {
// Log.tracev("{0} - Processing pair {1}", name, pair);
if (pair instanceof Tombstone<V>) {
return null;
}
return ((Data<V>) pair).value();
});
}
public static <K extends Comparable<K>, V> CloseableKvIterator<K, V> of(String name, IteratorStart startType, K startKey, IterProdFn<K, MaybeTombstone<V>>... iterators) {
return of(name, startType, startKey, List.of(iterators));
}
}

View File

@@ -2,26 +2,26 @@ package com.usatiuk.objects.iterators;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.function.Function;
public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends ReversibleKvIterator<K, V_T> {
private final CloseableKvIterator<K, V> _backing;
private final Function<V, V_T> _transformer;
private Pair<K, V_T> _next = null;
public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final MergingKvIterator<K, MaybeTombstone<V>> _backing;
private Pair<K, V> _next = null;
private boolean _checkedNext = false;
public PredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<V, V_T> transformer) {
public TombstoneSkippingIterator(IteratorStart start, K startKey, List<CloseableKvIterator<K, MaybeTombstone<V>>> iterators) {
_goingForward = true;
_backing = backing;
_transformer = transformer;
_backing = new MergingKvIterator<>(start, startKey, iterators);
if (start == IteratorStart.GE || start == IteratorStart.GT)
return;
fillNext();
boolean shouldGoBack = false;
if (canHaveNext())
tryFillNext();
if (start == IteratorStart.LE) {
if (_next == null || _next.getKey().compareTo(startKey) > 0) {
shouldGoBack = true;
@@ -38,34 +38,27 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
_backing.skipPrev();
fillNext();
_goingForward = true;
_backing.skip();
if (_next != null)
_backing.skip();
fillNext();
}
}
private boolean canHaveNext() {
return (_goingForward ? _backing.hasNext() : _backing.hasPrev());
}
// switch (start) {
// case LT -> {
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
// }
// case LE -> {
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
// }
// case GT -> {
// assert _next == null || _next.getKey().compareTo(startKey) > 0;
// }
// case GE -> {
// assert _next == null || _next.getKey().compareTo(startKey) >= 0;
// }
// }
private boolean tryFillNext() {
var next = _goingForward ? _backing.next() : _backing.prev();
if (next.getValue() instanceof Tombstone<?>)
return false;
_next = Pair.of(next.getKey(), ((Data<V>) next.getValue()).value());
return true;
}
private void fillNext() {
while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) {
var next = _goingForward ? _backing.next() : _backing.prev();
var transformed = _transformer.apply(next.getValue());
if (transformed == null)
continue;
_next = Pair.of(next.getKey(), transformed);
while (_next == null && canHaveNext()) {
tryFillNext();
}
_checkedNext = true;
}
@@ -80,9 +73,6 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
else if (!_goingForward && !wasAtEnd)
_backing.skipPrev();
// if (!wasAtEnd)
// Log.tracev("Skipped in reverse: {0}", _next);
_next = null;
_checkedNext = false;
}
@@ -117,7 +107,7 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
}
@Override
protected Pair<K, V_T> nextImpl() {
protected Pair<K, V> nextImpl() {
if (!_checkedNext)
fillNext();
@@ -137,7 +127,6 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
@Override
public String toString() {
return "PredicateKvIterator{" +
"_backing=" + _backing +
", _next=" + _next +
'}';
}

View File

@@ -2,13 +2,17 @@ package com.usatiuk.objects.snapshot;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.MaybeTombstone;
import com.usatiuk.objects.iterators.Tombstone;
import com.usatiuk.utils.AutoCloseableNoThrow;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
public interface Snapshot<K extends Comparable<K>, V> extends AutoCloseableNoThrow {
CloseableKvIterator<K, V> getIterator(IteratorStart start, K key);
List<CloseableKvIterator<K, MaybeTombstone<V>>> getIterator(IteratorStart start, K key);
@Nonnull
Optional<V> readObject(K name);

View File

@@ -5,6 +5,7 @@ import com.usatiuk.objects.JDataVersionedWrapperLazy;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.utils.ListUtils;
import io.quarkus.logging.Log;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
@@ -16,6 +17,7 @@ import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -33,6 +35,7 @@ public class CachingObjectPersistentStore {
private ExecutorService _statusExecutor;
private AtomicLong _cached = new AtomicLong();
private AtomicLong _cacheTries = new AtomicLong();
public CachingObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.lru.limit") int sizeLimit) {
_cache = new AtomicReference<>(
new Cache(TreePMap.empty(), 0, -1, sizeLimit)
@@ -150,10 +153,12 @@ public class CachingObjectPersistentStore {
}
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
return TombstoneMergingKvIterator.<JObjectKey, JDataVersionedWrapper>of("cache", start, key,
(mS, mK) -> new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), mS, mK),
(mS, mK) -> new CachingKvIterator(_backing.getIterator(start, key)));
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
return ListUtils.prependAndMap(
new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), start, key),
_backing.getIterator(start, key),
i -> new CachingKvIterator((CloseableKvIterator<JObjectKey, JDataVersionedWrapper>) (CloseableKvIterator<JObjectKey, ?>) i)
);
}
@Nonnull

View File

@@ -3,10 +3,7 @@ package com.usatiuk.objects.stores;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyMax;
import com.usatiuk.objects.JObjectKeyMin;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.KeyPredicateKvIterator;
import com.usatiuk.objects.iterators.ReversibleKvIterator;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import io.quarkus.arc.properties.IfBuildProperty;
import io.quarkus.logging.Log;
@@ -26,8 +23,10 @@ import java.lang.ref.Cleaner;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.stream.Stream;
import static org.lmdbjava.DbiFlags.MDB_CREATE;
import static org.lmdbjava.Env.create;
@@ -112,9 +111,9 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
private boolean _closed = false;
@Override
public CloseableKvIterator<JObjectKey, ByteBuffer> getIterator(IteratorStart start, JObjectKey key) {
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
assert !_closed;
return new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn, start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR));
return List.of(new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn, start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR)));
}
@Nonnull
@@ -195,7 +194,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
return _root.toFile().getUsableSpace();
}
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, ByteBuffer> {
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>> {
private static final Cleaner CLEANER = Cleaner.create();
private final Txn<ByteBuffer> _txn; // Managed by the snapshot
private final Cursor<ByteBuffer> _cursor;
@@ -350,13 +349,13 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
@Override
protected Pair<JObjectKey, ByteBuffer> nextImpl() {
protected Pair<JObjectKey, MaybeTombstone<ByteBuffer>> nextImpl() {
if (!_hasNext) {
throw new NoSuchElementException("No more elements");
}
// TODO: Right now with java serialization it doesn't matter, it's all copied to arrays anyway
var val = _cursor.val();
var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), val.asReadOnlyBuffer());
Pair<JObjectKey, MaybeTombstone<ByteBuffer>> ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), new DataWrapper<>(val.asReadOnlyBuffer()));
if (_goingForward)
_hasNext = _cursor.next();
else

View File

@@ -2,10 +2,7 @@ package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.MappingKvIterator;
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import io.quarkus.arc.properties.IfBuildProperty;
import jakarta.enterprise.context.ApplicationScoped;
@@ -13,8 +10,10 @@ import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Stream;
@ApplicationScoped
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory")
@@ -31,8 +30,8 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
private final long _lastCommitId = MemoryObjectPersistentStore.this._lastCommitId;
@Override
public CloseableKvIterator<JObjectKey, ByteBuffer> getIterator(IteratorStart start, JObjectKey key) {
return new MappingKvIterator<>(new NavigableMapKvIterator<>(_objects, start, key), ByteString::asReadOnlyByteBuffer);
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
return List.of(new MappingKvIterator<>(new NavigableMapKvIterator<>(_objects, start, key), s -> new DataWrapper<>(s.asReadOnlyByteBuffer())));
}
@Nonnull

View File

@@ -1,21 +1,20 @@
package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JDataVersionedWrapperSerializer;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.ObjectSerializer;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.MappingKvIterator;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.utils.ListUtils;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import javax.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
@ApplicationScoped
public class SerializingObjectPersistentStore {
@@ -30,8 +29,10 @@ public class SerializingObjectPersistentStore {
private final Snapshot<JObjectKey, ByteBuffer> _backing = delegateStore.getSnapshot();
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
return new MappingKvIterator<>(_backing.getIterator(start, key), d -> serializer.deserialize(d));
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
return ListUtils.map(_backing.getIterator(start, key),
i -> new MappingKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>, MaybeTombstone<JDataVersionedWrapper>>(i,
d -> serializer.deserialize(((DataWrapper<ByteBuffer>) d).value())));
}
@Nonnull

View File

@@ -3,10 +3,13 @@ package com.usatiuk.objects.stores;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JDataVersionedWrapperImpl;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.MaybeTombstone;
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.objects.transaction.TxCommitException;
import com.usatiuk.objects.transaction.TxRecord;
import com.usatiuk.utils.ListUtils;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
@@ -26,24 +29,38 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
@ApplicationScoped
public class WritebackObjectPersistentStore {
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
private final AtomicReference<PendingWriteData> _pendingWrites = new AtomicReference<>(null);
private final Object _flushWaitSynchronizer = new Object();
private final AtomicLong _lastWrittenId = new AtomicLong(-1);
private final AtomicLong _lastCommittedId = new AtomicLong();
private final AtomicLong _waitedTotal = new AtomicLong(0);
@Inject
CachingObjectPersistentStore cachedStore;
@Inject
ExecutorService _callbackExecutor;
@ConfigProperty(name = "dhfs.objects.writeback.limit")
long sizeLimit;
private long currentSize = 0;
int sizeLimit;
private TxBundle _pendingBundle = null;
private int _curSize = 0;
private final AtomicReference<PendingWriteData> _pendingWrites = new AtomicReference<>(null);
private final ReentrantLock _pendingBundleLock = new ReentrantLock();
private final Condition _newBundleCondition = _pendingBundleLock.newCondition();
private final Condition _flushCondition = _pendingBundleLock.newCondition();
private final AtomicLong _lastFlushedId = new AtomicLong(-1);
private final AtomicLong _lastCommittedId = new AtomicLong(-1);
private final AtomicLong _waitedTotal = new AtomicLong(0);
private ExecutorService _writebackExecutor;
private ExecutorService _statusExecutor;
private volatile boolean _ready = false;
void init(@Observes @Priority(120) StartupEvent event) {
@@ -61,8 +78,8 @@ public class WritebackObjectPersistentStore {
try {
while (true) {
Thread.sleep(1000);
if (currentSize > 0)
Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB");
if (_curSize > 0)
Log.info("Tx commit status: size=" + _curSize / 1024 / 1024 + "MB");
}
} catch (InterruptedException ignored) {
}
@@ -72,7 +89,7 @@ public class WritebackObjectPersistentStore {
lastTxId = s.id();
}
_lastCommittedId.set(lastTxId);
_lastWrittenId.set(lastTxId);
_lastFlushedId.set(lastTxId);
_pendingWrites.set(new PendingWriteData(TreePMap.empty(), lastTxId, lastTxId));
_ready = true;
}
@@ -80,11 +97,14 @@ public class WritebackObjectPersistentStore {
void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException {
Log.info("Waiting for all transactions to drain");
synchronized (_flushWaitSynchronizer) {
_ready = false;
while (currentSize > 0) {
_flushWaitSynchronizer.wait();
_ready = false;
_pendingBundleLock.lock();
try {
while (_curSize > 0) {
_flushCondition.await();
}
} finally {
_pendingBundleLock.unlock();
}
_writebackExecutor.shutdownNow();
@@ -98,21 +118,19 @@ public class WritebackObjectPersistentStore {
private void writeback() {
while (!Thread.interrupted()) {
try {
TxBundle bundle = new TxBundle(0);
synchronized (_pendingBundles) {
while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready)
_pendingBundles.wait();
TxBundle bundle;
_pendingBundleLock.lock();
try {
while (_pendingBundle == null)
_newBundleCondition.await();
bundle = _pendingBundle;
_pendingBundle = null;
long diff = 0;
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var toCompress = _pendingBundles.poll();
diff -= toCompress.size();
bundle.compress(toCompress);
}
diff += bundle.size();
synchronized (_flushWaitSynchronizer) {
currentSize += diff;
}
_curSize -= bundle.size();
assert _curSize == 0;
_flushCondition.signal();
} finally {
_pendingBundleLock.unlock();
}
var toWrite = new ArrayList<Pair<JObjectKey, JDataVersionedWrapper>>();
@@ -132,15 +150,12 @@ public class WritebackObjectPersistentStore {
}
}
cachedStore.commitTx(
new TxManifestObj<>(
Collections.unmodifiableList(toWrite),
Collections.unmodifiableList(toDelete)
), bundle.id());
cachedStore.commitTx(new TxManifestObj<>(toWrite, toDelete), bundle.id());
Log.tracev("Bundle {0} committed", bundle.id());
while (true) {
_pendingBundleLock.lock();
try {
var curPw = _pendingWrites.get();
var curPwMap = curPw.pendingWrites();
for (var e : bundle._entries.values()) {
@@ -153,25 +168,16 @@ public class WritebackObjectPersistentStore {
bundle.id(),
curPw.lastCommittedId()
);
if (_pendingWrites.compareAndSet(curPw, newCurPw))
break;
_pendingWrites.compareAndSet(curPw, newCurPw);
} finally {
_pendingBundleLock.unlock();
}
List<List<Runnable>> callbacks = new ArrayList<>();
synchronized (_notFlushedBundles) {
_lastWrittenId.set(bundle.id());
while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.id()) {
callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted());
}
}
callbacks.forEach(l -> l.forEach(Runnable::run));
synchronized (_flushWaitSynchronizer) {
currentSize -= bundle.size();
// FIXME:
if (currentSize <= sizeLimit || !_ready)
_flushWaitSynchronizer.notifyAll();
}
_lastFlushedId.set(bundle.id());
var callbacks = bundle.callbacks();
_callbackExecutor.submit(() -> {
callbacks.forEach(Runnable::run);
});
} catch (InterruptedException ignored) {
} catch (Exception e) {
Log.error("Uncaught exception in writeback", e);
@@ -184,123 +190,97 @@ public class WritebackObjectPersistentStore {
public long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
verifyReady();
boolean wait = false;
while (true) {
if (wait) {
synchronized (_flushWaitSynchronizer) {
long started = System.currentTimeMillis();
while (currentSize > sizeLimit) {
try {
_flushWaitSynchronizer.wait();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
long waited = System.currentTimeMillis() - started;
_waitedTotal.addAndGet(waited);
if (Log.isTraceEnabled())
Log.tracev("Thread {0} waited for tx bundle for {1} ms", Thread.currentThread().getName(), waited);
wait = false;
}
_pendingBundleLock.lock();
try {
boolean shouldWake = false;
if (_curSize > sizeLimit) {
shouldWake = true;
long started = System.currentTimeMillis();
while (_curSize > sizeLimit)
_flushCondition.await();
long waited = System.currentTimeMillis() - started;
_waitedTotal.addAndGet(waited);
if (Log.isTraceEnabled())
Log.tracev("Thread {0} waited for tx bundle for {1} ms", Thread.currentThread().getName(), waited);
}
synchronized (_pendingBundles) {
synchronized (_flushWaitSynchronizer) {
if (currentSize > sizeLimit) {
if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var target = _pendingBundles.poll();
var oursId = _lastCommittedId.incrementAndGet();
long diff = -target.size();
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var toCompress = _pendingBundles.poll();
diff -= toCompress.size();
target.compress(toCompress);
}
diff += target.size();
currentSize += diff;
_pendingBundles.addFirst(target);
}
var curBundle = _pendingBundle;
int oldSize = 0;
if (curBundle != null) {
oldSize = curBundle.size();
curBundle.setId(oursId);
} else {
curBundle = new TxBundle(oursId);
}
var curPw = _pendingWrites.get();
var curPwMap = curPw.pendingWrites();
for (var action : writes) {
var key = action.key();
switch (action) {
case TxRecord.TxObjectRecordWrite<?> write -> {
// Log.tracev("Flushing object {0}", write.key());
var wrapper = new JDataVersionedWrapperImpl(write.data(), oursId);
curPwMap = curPwMap.plus(key, new PendingWrite(wrapper, oursId));
curBundle.commit(wrapper);
}
if (currentSize > sizeLimit) {
wait = true;
continue;
case TxRecord.TxObjectRecordDeleted deleted -> {
// Log.tracev("Deleting object {0}", deleted.key());
curPwMap = curPwMap.plus(key, new PendingDelete(key, oursId));
curBundle.delete(key);
}
}
TxBundle bundle;
synchronized (_notFlushedBundles) {
bundle = new TxBundle(_lastCommittedId.incrementAndGet());
_pendingBundles.addLast(bundle);
_notFlushedBundles.put(bundle.id(), bundle);
}
for (var action : writes) {
switch (action) {
case TxRecord.TxObjectRecordWrite<?> write -> {
Log.tracev("Flushing object {0}", write.key());
bundle.commit(new JDataVersionedWrapperImpl(write.data(), bundle.id()));
}
case TxRecord.TxObjectRecordDeleted deleted -> {
Log.tracev("Deleting object {0}", deleted.key());
bundle.delete(deleted.key());
}
default -> {
throw new TxCommitException("Unexpected value: " + action.key());
}
}
}
while (true) {
var curPw = _pendingWrites.get();
var curPwMap = curPw.pendingWrites();
for (var e : ((TxBundle) bundle)._entries.values()) {
switch (e) {
case TxBundle.CommittedEntry c -> {
curPwMap = curPwMap.plus(c.key(), new PendingWrite(c.data, bundle.id()));
}
case TxBundle.DeletedEntry d -> {
curPwMap = curPwMap.plus(d.key(), new PendingDelete(d.key, bundle.id()));
}
default -> throw new IllegalStateException("Unexpected value: " + e);
}
}
// Now, make the changes visible to new iterators
var newCurPw = new PendingWriteData(
curPwMap,
curPw.lastFlushedId(),
bundle.id()
);
if (!_pendingWrites.compareAndSet(curPw, newCurPw))
continue;
((TxBundle) bundle).setReady();
if (_pendingBundles.peek() == bundle)
_pendingBundles.notify();
synchronized (_flushWaitSynchronizer) {
currentSize += ((TxBundle) bundle).size();
}
return bundle.id();
}
}
// Now, make the changes visible to new iterators
var newCurPw = new PendingWriteData(
curPwMap,
curPw.lastFlushedId(),
oursId
);
_pendingWrites.compareAndSet(curPw, newCurPw);
_pendingBundle = curBundle;
_newBundleCondition.signalAll();
_curSize += (curBundle.size() - oldSize);
if (shouldWake && _curSize < sizeLimit) {
_flushCondition.signal();
}
return oursId;
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
_pendingBundleLock.unlock();
}
}
public void asyncFence(long bundleId, Runnable fn) {
verifyReady();
if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!");
if (_lastWrittenId.get() >= bundleId) {
if (_lastFlushedId.get() >= bundleId) {
fn.run();
return;
}
synchronized (_notFlushedBundles) {
if (_lastWrittenId.get() >= bundleId) {
_pendingBundleLock.lock();
try {
if (_lastFlushedId.get() >= bundleId) {
fn.run();
return;
}
_notFlushedBundles.get(bundleId).addCallback(fn);
var pendingBundle = _pendingBundle;
if (pendingBundle == null) {
fn.run();
return;
}
pendingBundle.addCallback(fn);
} finally {
_pendingBundleLock.unlock();
}
}
@@ -341,10 +321,8 @@ public class WritebackObjectPersistentStore {
private final long txId = finalPw.lastCommittedId();
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
return TombstoneMergingKvIterator.<JObjectKey, JDataVersionedWrapper>of("writeback-ps", start, key,
(tS, tK) -> new NavigableMapKvIterator<>(_pendingWrites, tS, tK),
(tS, tK) -> (CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (CloseableKvIterator<JObjectKey, ?>) _cache.getIterator(tS, tK));
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
return ListUtils.prepend(new NavigableMapKvIterator<>(_pendingWrites, start, key), _cache.getIterator(start, key));
}
@Nonnull
@@ -381,46 +359,40 @@ public class WritebackObjectPersistentStore {
}
}
public interface VerboseReadResult {
}
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
long lastFlushedId,
long lastCommittedId) {
}
private static class TxBundle {
private final LinkedHashMap<JObjectKey, BundleEntry> _entries = new LinkedHashMap<>();
private final HashMap<JObjectKey, BundleEntry> _entries = new HashMap<>();
private final ArrayList<Runnable> _callbacks = new ArrayList<>();
private int _size = 0;
private long _txId;
private volatile boolean _ready = false;
private long _size = 0;
private boolean _wasCommitted = false;
ArrayList<Runnable> callbacks() {
return _callbacks;
}
private TxBundle(long txId) {
_txId = txId;
}
public long id() {
return _txId;
public void setId(long id) {
_txId = id;
}
public void setReady() {
_ready = true;
public long id() {
return _txId;
}
public void addCallback(Runnable callback) {
synchronized (_callbacks) {
if (_wasCommitted) throw new IllegalStateException();
_callbacks.add(callback);
}
_callbacks.add(callback);
}
public List<Runnable> setCommitted() {
synchronized (_callbacks) {
_wasCommitted = true;
return Collections.unmodifiableList(_callbacks);
}
public int size() {
return _size;
}
private void putEntry(BundleEntry entry) {
@@ -439,28 +411,7 @@ public class WritebackObjectPersistentStore {
putEntry(new DeletedEntry(obj));
}
public long size() {
return _size;
}
public void compress(TxBundle other) {
if (_txId >= other._txId)
throw new IllegalArgumentException("Compressing an older bundle into newer");
_txId = other._txId;
for (var entry : other._entries.values()) {
putEntry(entry);
}
synchronized (_callbacks) {
assert !_wasCommitted;
assert !other._wasCommitted;
_callbacks.addAll(other._callbacks);
}
}
private interface BundleEntry {
private sealed interface BundleEntry permits CommittedEntry, DeletedEntry {
JObjectKey key();
int size();
@@ -478,10 +429,4 @@ public class WritebackObjectPersistentStore {
}
}
}
public record VerboseReadResultPersisted(Optional<JDataVersionedWrapper> data) implements VerboseReadResult {
}
public record VerboseReadResultPending(PendingWriteEntry pending) implements VerboseReadResult {
}
}

View File

@@ -43,4 +43,9 @@ public class CurrentTransaction implements Transaction {
public <T extends JData> void put(JData obj) {
transactionManager.current().put(obj);
}
@Override
public <T extends JData> void putNew(JData obj) {
transactionManager.current().putNew(obj);
}
}

View File

@@ -19,7 +19,6 @@ import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Stream;
@ApplicationScoped
public class JObjectManager {
@@ -59,97 +58,92 @@ public class JObjectManager {
verifyReady();
var writes = new HashMap<JObjectKey, TxRecord.TxObjectRecord<?>>();
Snapshot<JObjectKey, JDataVersionedWrapper> commitSnapshot = null;
Map<JObjectKey, TransactionObject<?>> readSet = null;
Map<JObjectKey, Optional<JDataVersionedWrapper>> readSet = null;
Collection<AutoCloseableNoThrow> toUnlock = null;
try {
try {
long pendingCount = 0;
List<CommitHookIterationData> hookIterationData;
{
CommitHookIterationData[] hookIterationDataArray = new CommitHookIterationData[_preCommitTxHooks.size()];
for (int i = 0; i < _preCommitTxHooks.size(); i++) {
var hook = _preCommitTxHooks.get(i);
hookIterationDataArray[i] = new CommitHookIterationData(hook, new HashMap<>(), new HashMap<>());
}
hookIterationData = List.of(hookIterationDataArray);
long pendingCount = 0;
List<CommitHookIterationData> hookIterationData;
{
CommitHookIterationData[] hookIterationDataArray = new CommitHookIterationData[_preCommitTxHooks.size()];
for (int i = 0; i < _preCommitTxHooks.size(); i++) {
var hook = _preCommitTxHooks.get(i);
hookIterationDataArray[i] = new CommitHookIterationData(hook, new HashMap<>(), new HashMap<>());
}
hookIterationData = List.of(hookIterationDataArray);
}
for (var n : tx.drainNewWrites()) {
for (var hookPut : hookIterationData) {
hookPut.pendingWrites().put(n.key(), n);
pendingCount++;
}
writes.put(n.key(), n);
for (var n : tx.drainNewWrites()) {
var key = n.key();
for (var hookPut : hookIterationData) {
hookPut.pendingWrites().put(key, n);
pendingCount++;
}
writes.put(key, n);
}
// Run hooks for all objects
// Every hook should see every change made to every object, yet the object's evolution
// should be consistent from the view point of each individual hook
// For example, when a hook makes changes to an object, and another hook changes the object before/after it
// on the next iteration, the first hook should receive the version of the object it had created
// as the "old" version, and the new version with all the changes after it.
while (pendingCount > 0) {
for (var hookId : hookIterationData) {
var hook = hookId.hook();
var lastCurHookSeen = hookId.lastWrites();
Function<JObjectKey, JData> getPrev =
key -> switch (lastCurHookSeen.get(key)) {
case TxRecord.TxObjectRecordWrite<?> write -> write.data();
case TxRecord.TxObjectRecordDeleted deleted -> null;
case null -> tx.getFromSource(JData.class, key).orElse(null);
default -> {
throw new TxCommitException("Unexpected value: " + writes.get(key));
}
};
var curIteration = hookId.pendingWrites();
// Run hooks for all objects
// Every hook should see every change made to every object, yet the object's evolution
// should be consistent from the view point of each individual hook
// For example, when a hook makes changes to an object, and another hook changes the object before/after it
// on the next iteration, the first hook should receive the version of the object it had created
// as the "old" version, and the new version with all the changes after it.
while (pendingCount > 0) {
for (var hookId : hookIterationData) {
var hook = hookId.hook();
var lastCurHookSeen = hookId.lastWrites();
Function<JObjectKey, JData> getPrev =
key -> switch (lastCurHookSeen.get(key)) {
case TxRecord.TxObjectRecordWrite<?> write -> write.data();
case TxRecord.TxObjectRecordDeleted deleted -> null;
case null -> tx.getFromSource(JData.class, key).orElse(null);
default -> {
throw new TxCommitException("Unexpected value: " + writes.get(key));
}
};
var curIteration = hookId.pendingWrites();
// Log.trace("Commit iteration with " + curIteration.size() + " records for hook " + hook.getClass());
for (var entry : curIteration.entrySet()) {
for (var entry : curIteration.entrySet()) {
var key = entry.getKey();
// Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey());
var oldObj = getPrev.apply(entry.getKey());
lastCurHookSeen.put(entry.getKey(), entry.getValue());
switch (entry.getValue()) {
case TxRecord.TxObjectRecordWrite<?> write -> {
if (oldObj == null) {
hook.onCreate(write.key(), write.data());
} else {
hook.onChange(write.key(), oldObj, write.data());
}
var oldObj = getPrev.apply(key);
lastCurHookSeen.put(key, entry.getValue());
switch (entry.getValue()) {
case TxRecord.TxObjectRecordWrite<?> write -> {
if (oldObj == null) {
hook.onCreate(key, write.data());
} else {
hook.onChange(key, oldObj, write.data());
}
case TxRecord.TxObjectRecordDeleted deleted -> {
hook.onDelete(deleted.key(), oldObj);
}
default -> throw new TxCommitException("Unexpected value: " + entry);
}
}
pendingCount -= curIteration.size();
curIteration.clear();
for (var n : tx.drainNewWrites()) {
for (var hookPut : hookIterationData) {
if (hookPut == hookId) {
lastCurHookSeen.put(n.key(), n);
continue;
}
var before = hookPut.pendingWrites().put(n.key(), n);
if (before == null)
pendingCount++;
case TxRecord.TxObjectRecordDeleted deleted -> {
hook.onDelete(key, oldObj);
}
writes.put(n.key(), n);
default -> throw new TxCommitException("Unexpected value: " + entry);
}
}
}
} catch (Throwable e) {
for (var read : tx.reads().entrySet()) {
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
locked.lock().close();
pendingCount -= curIteration.size();
curIteration.clear();
for (var n : tx.drainNewWrites()) {
var key = n.key();
for (var hookPut : hookIterationData) {
if (hookPut == hookId) {
lastCurHookSeen.put(key, n);
continue;
}
var before = hookPut.pendingWrites().put(key, n);
if (before == null)
pendingCount++;
}
writes.put(key, n);
}
}
throw e;
}
readSet = tx.reads();
@@ -158,17 +152,16 @@ public class JObjectManager {
toUnlock = new ArrayList<>(readSet.size() + writes.size());
ArrayList<JObjectKey> toLock = new ArrayList<>(readSet.size() + writes.size());
for (var read : readSet.entrySet()) {
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
toUnlock.add(locked.lock());
} else {
toLock.add(read.getKey());
}
toLock.add(read.getKey());
}
for (var write : writes.entrySet()) {
toLock.add(write.getKey());
for (var write : writes.keySet()) {
if (!readSet.containsKey(write))
toLock.add(write);
}
Collections.sort(toLock);
toLock.sort(null);
for (var key : toLock) {
if (tx.knownNew().contains(key))
continue;
var lock = lockManager.lockObject(key);
toUnlock.add(lock);
}
@@ -180,10 +173,7 @@ public class JObjectManager {
long version = 0L;
for (var read : readSet.values()) {
version = Math.max(version, read.data().map(JDataVersionedWrapper::version).orElse(0L));
if (read instanceof TransactionObjectLocked<?> locked) {
locked.lock().close();
}
version = Math.max(version, read.map(JDataVersionedWrapper::version).orElse(0L));
}
long finalVersion = version;
@@ -191,14 +181,16 @@ public class JObjectManager {
writebackObjectPersistentStore.asyncFence(finalVersion, r);
};
var onCommit = tx.getOnCommit();
var onFlush = tx.getOnFlush();
return Pair.of(
Stream.concat(
tx.getOnCommit().stream(),
Stream.<Runnable>of(() -> {
for (var f : tx.getOnFlush())
fenceFn.accept(f);
})
).toList(),
List.of(() -> {
for (var f : onCommit)
f.run();
for (var f : onFlush)
fenceFn.accept(f);
}),
new TransactionHandle() {
@Override
public void onFlush(Runnable runnable) {
@@ -214,13 +206,13 @@ public class JObjectManager {
for (var read : readSet.entrySet()) {
var current = commitSnapshot.readObject(read.getKey());
if (current.isEmpty() != read.getValue().data().isEmpty()) {
if (current.isEmpty() != read.getValue().isEmpty()) {
Log.tracev("Checking read dependency {0} - not found", read.getKey());
throw new TxCommitException("Serialization hazard: " + current.isEmpty() + " vs " + read.getValue().data().isEmpty());
throw new TxCommitException("Serialization hazard: " + current.isEmpty() + " vs " + read.getValue().isEmpty());
}
if (current.isEmpty()) {
// TODO: Every write gets a dependency due to hooks
// Every write gets a dependency due to hooks
continue;
// assert false;
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().value().isEmpty());
@@ -245,7 +237,7 @@ public class JObjectManager {
}
return Pair.of(
List.copyOf(tx.getOnCommit()),
tx.getOnCommit(),
new TransactionHandle() {
@Override
public void onFlush(Runnable runnable) {
@@ -268,11 +260,6 @@ public class JObjectManager {
public void rollback(TransactionPrivate tx) {
verifyReady();
tx.reads().forEach((key, value) -> {
if (value instanceof TransactionObjectLocked<?> locked) {
locked.lock().close();
}
});
tx.close();
}

View File

@@ -14,6 +14,7 @@ public interface Transaction extends TransactionHandle {
<T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy);
<T extends JData> void put(JData obj);
<T extends JData> void putNew(JData obj);
void delete(JObjectKey key);

View File

@@ -6,6 +6,7 @@ import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
import com.usatiuk.utils.ListUtils;
import io.quarkus.logging.Log;
import jakarta.inject.Inject;
import jakarta.inject.Singleton;
@@ -55,12 +56,15 @@ public class TransactionFactoryImpl implements TransactionFactory {
}
private class TransactionImpl implements TransactionPrivate {
private final Map<JObjectKey, TransactionObject<?>> _readSet = new HashMap<>();
private final Map<JObjectKey, Optional<JDataVersionedWrapper>> _readSet = new HashMap<>();
private final NavigableMap<JObjectKey, TxRecord.TxObjectRecord<?>> _writes = new TreeMap<>();
private final List<Runnable> _onCommit = new ArrayList<>();
private final List<Runnable> _onFlush = new ArrayList<>();
private final List<Runnable> _onCommit = new LinkedList<>();
private final List<Runnable> _onFlush = new LinkedList<>();
private final HashSet<JObjectKey> _knownNew = new HashSet<>();
private final Snapshot<JObjectKey, JDataVersionedWrapper> _snapshot;
private boolean _closed = false;
private boolean _writeTrack = false;
private Map<JObjectKey, TxRecord.TxObjectRecord<?>> _newWrites = new HashMap<>();
private TransactionImpl() {
@@ -94,103 +98,101 @@ public class TransactionFactoryImpl implements TransactionFactory {
@Override
public <T extends JData> Optional<T> getFromSource(Class<T> type, JObjectKey key) {
return _readSet.computeIfAbsent(key, k -> {
var read = _snapshot.readObject(k);
return new TransactionObjectNoLock<>(read);
})
.data()
.map(w -> type.cast(w.data()));
}
public <T extends JData> Optional<T> getWriteLockedFromSource(Class<T> type, JObjectKey key) {
var got = _readSet.get(key);
if (got == null) {
var lock = lockManager.lockObject(key);
try {
var read = _snapshot.readObject(key);
_readSet.put(key, new TransactionObjectLocked<>(read, lock));
return read.map(JDataVersionedWrapper::data).map(type::cast);
} catch (Exception e) {
lock.close();
throw e;
}
if (_knownNew.contains(key)) {
return Optional.empty();
}
return got.data().map(JDataVersionedWrapper::data).map(type::cast);
return _readSet.computeIfAbsent(key, _snapshot::readObject)
.map(JDataVersionedWrapper::data)
.map(type::cast);
}
@Override
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy) {
switch (_writes.get(key)) {
case TxRecord.TxObjectRecordWrite<?> write -> {
return Optional.of(type.cast(write.data()));
}
case TxRecord.TxObjectRecordDeleted deleted -> {
return Optional.empty();
}
case null, default -> {
}
}
if (neverLock)
return getFromSource(type, key);
return switch (strategy) {
case OPTIMISTIC -> getFromSource(type, key);
case WRITE -> getWriteLockedFromSource(type, key);
return switch (_writes.get(key)) {
case TxRecord.TxObjectRecordWrite<?> write -> Optional.of(type.cast(write.data()));
case TxRecord.TxObjectRecordDeleted deleted -> Optional.empty();
case null -> getFromSource(type, key);
};
}
@Override
public void delete(JObjectKey key) {
var got = _writes.get(key);
if (got != null) {
if (got instanceof TxRecord.TxObjectRecordDeleted) {
return;
}
var record = new TxRecord.TxObjectRecordDeleted(key);
if (_writes.put(key, record) instanceof TxRecord.TxObjectRecordDeleted) {
return;
}
_writes.put(key, new TxRecord.TxObjectRecordDeleted(key));
_newWrites.put(key, new TxRecord.TxObjectRecordDeleted(key));
if (_writeTrack)
_newWrites.put(key, record);
}
@Override
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
Log.tracev("Getting tx iterator with start={0}, key={1}", start, key);
return new ReadTrackingIterator(TombstoneMergingKvIterator.<JObjectKey, ReadTrackingInternalCrap>of("tx", start, key,
(tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK),
t -> switch (t) {
case TxRecord.TxObjectRecordWrite<?> write ->
new DataWrapper<>(new ReadTrackingInternalCrapTx(write.data()));
case TxRecord.TxObjectRecordDeleted deleted -> new TombstoneImpl<>();
case null, default -> null;
}),
(tS, tK) -> new MappingKvIterator<>(_snapshot.getIterator(tS, tK),
d -> new DataWrapper<ReadTrackingInternalCrap>(new ReadTrackingInternalCrapSource(d)))));
return new ReadTrackingIterator(new TombstoneSkippingIterator<JObjectKey, ReadTrackingInternalCrap>(start, key,
ListUtils.prependAndMap(
new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, start, key),
t -> switch (t) {
case TxRecord.TxObjectRecordWrite<?> write ->
new DataWrapper<ReadTrackingInternalCrap>(new ReadTrackingInternalCrapTx(write.data()));
case TxRecord.TxObjectRecordDeleted deleted ->
new TombstoneImpl<ReadTrackingInternalCrap>();
case null, default -> null;
}),
_snapshot.getIterator(start, key),
itin -> new MappingKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>, MaybeTombstone<ReadTrackingInternalCrap>>(itin,
d -> switch (d) {
case Data<JDataVersionedWrapper> w ->
new DataWrapper<>(new ReadTrackingInternalCrapSource(w.value()));
case Tombstone<JDataVersionedWrapper> t -> new TombstoneImpl<>();
case null, default -> null;
}))));
}
@Override
public void put(JData obj) {
var read = _readSet.get(obj.key());
if (read != null && (read.data().map(JDataVersionedWrapper::data).orElse(null) == obj)) {
var key = obj.key();
var read = _readSet.get(key);
if (read != null && (read.map(JDataVersionedWrapper::data).orElse(null) == obj)) {
return;
}
_writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
_newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
var record = new TxRecord.TxObjectRecordWrite<>(obj);
_writes.put(key, record);
if (_writeTrack)
_newWrites.put(key, record);
}
@Override
public void putNew(JData obj) {
var key = obj.key();
_knownNew.add(key);
var record = new TxRecord.TxObjectRecordWrite<>(obj);
_writes.put(key, record);
if (_writeTrack)
_newWrites.put(key, record);
}
@Override
public Collection<TxRecord.TxObjectRecord<?>> drainNewWrites() {
if (!_writeTrack) {
_writeTrack = true;
return Collections.unmodifiableCollection(_writes.values());
}
var ret = _newWrites;
_newWrites = new HashMap<>();
return ret.values();
}
@Override
public Map<JObjectKey, TransactionObject<?>> reads() {
return Collections.unmodifiableMap(_readSet);
public Map<JObjectKey, Optional<JDataVersionedWrapper>> reads() {
return _readSet;
}
@Override
public Set<JObjectKey> knownNew() {
return _knownNew;
}
@Override
@@ -226,7 +228,7 @@ public class TransactionFactoryImpl implements TransactionFactory {
public Pair<JObjectKey, JData> prev() {
var got = _backing.prev();
if (got.getValue() instanceof ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped)) {
_readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(wrapped)));
_readSet.putIfAbsent(got.getKey(), Optional.of(wrapped));
}
return Pair.of(got.getKey(), got.getValue().obj());
}
@@ -255,7 +257,7 @@ public class TransactionFactoryImpl implements TransactionFactory {
public Pair<JObjectKey, JData> next() {
var got = _backing.next();
if (got.getValue() instanceof ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped)) {
_readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(wrapped)));
_readSet.putIfAbsent(got.getKey(), Optional.of(wrapped));
}
return Pair.of(got.getKey(), got.getValue().obj());
}

View File

@@ -12,8 +12,8 @@ public interface TransactionManager {
void rollback();
default <T> T runTries(Supplier<T> supplier, int tries) {
if (current() != null) {
default <T> T runTries(Supplier<T> supplier, int tries, boolean nest) {
if (!nest && current() != null) {
return supplier.get();
}
@@ -41,8 +41,8 @@ public interface TransactionManager {
}
}
default TransactionHandle runTries(VoidFn fn, int tries) {
if (current() != null) {
default TransactionHandle runTries(VoidFn fn, int tries, boolean nest) {
if (!nest && current() != null) {
fn.apply();
return new TransactionHandle() {
@Override
@@ -74,23 +74,38 @@ public interface TransactionManager {
throw e;
}
}
}
default <T> T runTries(Supplier<T> supplier, int tries) {
return runTries(supplier, tries, false);
}
default TransactionHandle runTries(VoidFn fn, int tries) {
return runTries(fn, tries, false);
}
default TransactionHandle run(VoidFn fn, boolean nest) {
return runTries(fn, 10, nest);
}
default <T> T run(Supplier<T> supplier, boolean nest) {
return runTries(supplier, 10, nest);
}
default TransactionHandle run(VoidFn fn) {
return runTries(fn, 10);
return run(fn, false);
}
default <T> T run(Supplier<T> supplier) {
return runTries(supplier, 10);
return run(supplier, false);
}
default void executeTx(VoidFn fn) {
run(fn);
run(fn, false);
}
default <T> T executeTx(Supplier<T> supplier) {
return run(supplier);
return run(supplier, false);
}
Transaction current();

View File

@@ -1,46 +1,48 @@
package com.usatiuk.objects.transaction;
import io.quarkus.logging.Log;
import jakarta.annotation.Nullable;
import jakarta.inject.Inject;
import jakarta.inject.Singleton;
import org.apache.commons.lang3.tuple.Pair;
import java.util.Collection;
import java.util.Stack;
@Singleton
public class TransactionManagerImpl implements TransactionManager {
private static final ThreadLocal<TransactionPrivate> _currentTransaction = new ThreadLocal<>();
private static final ThreadLocal<Stack<TransactionPrivate>> _currentTransaction = ThreadLocal.withInitial(Stack::new);
@Inject
JObjectManager jObjectManager;
@Override
public void begin() {
if (_currentTransaction.get() != null) {
throw new IllegalStateException("Transaction already started");
}
Log.trace("Starting transaction");
var tx = jObjectManager.createTransaction();
_currentTransaction.set(tx);
_currentTransaction.get().push(tx);
}
@Override
public TransactionHandle commit() {
if (_currentTransaction.get() == null) {
var stack = _currentTransaction.get();
if (stack.empty()) {
throw new IllegalStateException("No transaction started");
}
var peeked = stack.peek();
Log.trace("Committing transaction");
Pair<Collection<Runnable>, TransactionHandle> ret;
try {
ret = jObjectManager.commit(_currentTransaction.get());
ret = jObjectManager.commit(peeked);
} catch (Throwable e) {
Log.trace("Transaction commit failed", e);
throw e;
} finally {
_currentTransaction.get().close();
_currentTransaction.remove();
peeked.close();
stack.pop();
if (stack.empty())
_currentTransaction.remove();
}
for (var r : ret.getLeft()) {
@@ -55,24 +57,33 @@ public class TransactionManagerImpl implements TransactionManager {
@Override
public void rollback() {
if (_currentTransaction.get() == null) {
var stack = _currentTransaction.get();
if (stack.empty()) {
throw new IllegalStateException("No transaction started");
}
var peeked = stack.peek();
try {
jObjectManager.rollback(_currentTransaction.get());
jObjectManager.rollback(peeked);
} catch (Throwable e) {
Log.error("Transaction rollback failed", e);
throw e;
} finally {
_currentTransaction.get().close();
_currentTransaction.remove();
peeked.close();
stack.pop();
if (stack.empty())
_currentTransaction.remove();
}
}
@Override
@Nullable
public Transaction current() {
return _currentTransaction.get();
var stack = _currentTransaction.get();
if (stack.empty()) {
return null;
}
return stack.peek();
}
}

View File

@@ -1,10 +0,0 @@
package com.usatiuk.objects.transaction;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JDataVersionedWrapper;
import java.util.Optional;
public interface TransactionObject<T extends JData> {
Optional<JDataVersionedWrapper> data();
}

View File

@@ -1,12 +0,0 @@
package com.usatiuk.objects.transaction;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.utils.AutoCloseableNoThrow;
import java.util.Optional;
public record TransactionObjectLocked<T extends JData>
(Optional<JDataVersionedWrapper> data, AutoCloseableNoThrow lock)
implements TransactionObject<T> {
}

View File

@@ -1,11 +0,0 @@
package com.usatiuk.objects.transaction;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JDataVersionedWrapper;
import java.util.Optional;
public record TransactionObjectNoLock<T extends JData>
(Optional<JDataVersionedWrapper> data)
implements TransactionObject<T> {
}

View File

@@ -9,12 +9,15 @@ import com.usatiuk.utils.AutoCloseableNoThrow;
import java.util.Collection;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
// The transaction interface actually used by user code to retrieve objects
public interface TransactionPrivate extends Transaction, TransactionHandlePrivate, AutoCloseableNoThrow {
Collection<TxRecord.TxObjectRecord<?>> drainNewWrites();
Map<JObjectKey, TransactionObject<?>> reads();
Map<JObjectKey, Optional<JDataVersionedWrapper>> reads();
Set<JObjectKey> knownNew();
<T extends JData> Optional<T> getFromSource(Class<T> type, JObjectKey key);

View File

@@ -4,7 +4,7 @@ import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
public class TxRecord {
public interface TxObjectRecord<T> {
public sealed interface TxObjectRecord<T> permits TxObjectRecordWrite, TxObjectRecordDeleted {
JObjectKey key();
}

View File

@@ -1,10 +1,12 @@
dhfs.objects.persistence=lmdb
dhfs.objects.writeback.limit=134217728
dhfs.objects.lru.limit=134217728
dhfs.objects.writeback.limit=16777216
dhfs.objects.lru.limit=67108864
dhfs.objects.lru.print-stats=false
dhfs.objects.lock_timeout_secs=15
dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs
dhfs.objects.persistence.snapshot-extra-checks=false
dhfs.objects.transaction.never-lock=true
dhfs.objects.last-seen.update=60
dhfs.objects.last-seen.timeout=43200
quarkus.log.category."com.usatiuk.objects.iterators".level=INFO
quarkus.log.category."com.usatiuk.objects.iterators".min-level=INFO

View File

@@ -1,5 +1,6 @@
package com.usatiuk.objects.iterators;
import jnr.ffi.annotations.In;
import net.jqwik.api.*;
import net.jqwik.api.state.Action;
import net.jqwik.api.state.ActionChain;
@@ -9,6 +10,7 @@ import org.junit.jupiter.api.Assertions;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.BiConsumer;
public class MergingKvIteratorPbtTest {
@Property
@@ -54,8 +56,8 @@ public class MergingKvIteratorPbtTest {
}
}
mergedIterator = new NavigableMapKvIterator<>(perfectMerged, startType, startKey);
mergingIterator = new MergingKvIterator<>("test", startType, startKey, pairs.stream().<IterProdFn<Integer, Integer>>map(
list -> (IteratorStart start, Integer key) -> new NavigableMapKvIterator<>(new TreeMap<Integer, Integer>(Map.ofEntries(list.toArray(Map.Entry[]::new))), start, key)
mergingIterator = new MergingKvIterator<>(startType, startKey, pairs.stream().<CloseableKvIterator<Integer, Integer>>map(
list -> new NavigableMapKvIterator<>(new TreeMap<Integer, Integer>(Map.ofEntries(list.toArray(Map.Entry[]::new))), startType, startKey)
).toList());
}

View File

@@ -1,348 +0,0 @@
package com.usatiuk.objects.iterators;
import com.usatiuk.objects.Just;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.pcollections.TreePMap;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
public class MergingKvIteratorTest {
@Test
public void testTestIterator() {
var list = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6));
var iterator = new SimpleIteratorWrapper<>(list.iterator());
var realIterator = list.iterator();
while (realIterator.hasNext()) {
Assertions.assertTrue(iterator.hasNext());
Assertions.assertEquals(realIterator.next(), iterator.next());
}
Assertions.assertFalse(iterator.hasNext());
var emptyList = List.<Pair<Integer, Integer>>of();
var emptyIterator = new SimpleIteratorWrapper<>(emptyList.iterator());
Assertions.assertFalse(emptyIterator.hasNext());
}
@Test
public void testSimple() {
var source1 = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)).iterator();
var source2 = List.of(Pair.of(2, 3), Pair.of(4, 5), Pair.of(6, 7)).iterator();
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1), (a, b) -> new SimpleIteratorWrapper<>(source2));
var expected = List.of(Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4), Pair.of(4, 5), Pair.of(5, 6), Pair.of(6, 7));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
}
@Test
public void testPriority() {
var source1 = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6));
var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7));
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()));
var expected = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()));
var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
}
@Test
public void testPriority2() {
var source1 = List.of(Pair.of(2, 4), Pair.of(5, 6));
var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5));
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()));
var expected = List.of(Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()));
var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
}
@Test
public void testPriorityLe() {
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5);
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
var expected = List.of(Pair.of(5, 6));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
Just.checkIterator(mergingIterator.reversed(), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3));
Assertions.assertFalse(mergingIterator.reversed().hasNext());
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6));
Assertions.assertFalse(mergingIterator.hasNext());
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
var expected2 = List.of(Pair.of(5, 6));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
Just.checkIterator(mergingIterator2.reversed(), Pair.of(5, 6), Pair.of(2, 5), Pair.of(1, 3));
Assertions.assertFalse(mergingIterator2.reversed().hasNext());
Just.checkIterator(mergingIterator2, Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6));
Assertions.assertFalse(mergingIterator2.hasNext());
var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
}
@Test
public void testPriorityLe2() {
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
var expected = List.of(Pair.of(5, 6));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
}
@Test
public void testPriorityLe3() {
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(6, 8);
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
var expected = List.of(Pair.of(5, 6), Pair.of(6, 8));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
Just.checkIterator(mergingIterator.reversed(), Pair.of(6, 8), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3));
Assertions.assertFalse(mergingIterator.reversed().hasNext());
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6), Pair.of(6, 8));
Assertions.assertFalse(mergingIterator.hasNext());
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
var expected2 = List.of(Pair.of(5, 6), Pair.of(6, 8));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
Assertions.assertTrue(mergingIterator3.hasPrev());
Assertions.assertTrue(mergingIterator3.hasNext());
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
}
@Test
public void testPriorityLe4() {
var source1 = TreePMap.<Integer, Integer>empty().plus(6, 7);
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
var expected = List.of(Pair.of(3, 4), Pair.of(6, 7));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
}
@Test
public void testPriorityLe5() {
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 2).plus(6, 7);
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
var expected = List.of(Pair.of(3, 4), Pair.of(6, 7));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
}
@Test
public void testPriorityLe6() {
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
var source2 = TreePMap.<Integer, Integer>empty().plus(4, 6);
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
var expected = List.of(Pair.of(4, 6));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
var expected2 = List.of(Pair.of(4, 6));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
}
@Test
public void testPriorityLe7() {
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 4).plus(3, 5).plus(4, 6);
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
var expected = List.of(Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
Just.checkIterator(mergingIterator.reversed(), Pair.of(4, 6), Pair.of(3, 5), Pair.of(1, 3));
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6));
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
var expected2 = List.of(Pair.of(1, 4), Pair.of(3, 5), Pair.of(4, 6));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
}
@Test
public void testPriorityLt() {
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5);
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
var expected = List.of(Pair.of(2, 4), Pair.of(5, 6));
for (var pair : expected) {
Assertions.assertTrue(mergingIterator.hasNext());
Assertions.assertEquals(pair, mergingIterator.next());
}
Assertions.assertFalse(mergingIterator.hasNext());
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
var expected2 = List.of(Pair.of(2, 5), Pair.of(5, 6));
for (var pair : expected2) {
Assertions.assertTrue(mergingIterator2.hasNext());
Assertions.assertEquals(pair, mergingIterator2.next());
}
Assertions.assertFalse(mergingIterator2.hasNext());
}
private class SimpleIteratorWrapper<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
private final Iterator<Pair<K, V>> _iterator;
private Pair<K, V> _next;
public SimpleIteratorWrapper(Iterator<Pair<K, V>> iterator) {
_iterator = iterator;
fillNext();
}
private void fillNext() {
while (_iterator.hasNext() && _next == null) {
_next = _iterator.next();
}
}
@Override
public K peekNextKey() {
if (_next == null) {
throw new NoSuchElementException();
}
return _next.getKey();
}
@Override
public void skip() {
if (_next == null) {
throw new NoSuchElementException();
}
_next = null;
fillNext();
}
@Override
public K peekPrevKey() {
throw new UnsupportedOperationException();
}
@Override
public Pair<K, V> prev() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasPrev() {
throw new UnsupportedOperationException();
}
@Override
public void skipPrev() {
throw new UnsupportedOperationException();
}
@Override
public void close() {
}
@Override
public boolean hasNext() {
return _next != null;
}
@Override
public Pair<K, V> next() {
if (_next == null) {
throw new NoSuchElementException("No more elements");
}
var ret = _next;
_next = null;
fillNext();
return ret;
}
}
}

View File

@@ -1,161 +0,0 @@
package com.usatiuk.objects.iterators;
import com.usatiuk.objects.Just;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.pcollections.TreePMap;
import java.util.List;
public class PredicateKvIteratorTest {
@Test
public void simpleTest() {
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.GT, 1),
IteratorStart.GE, 1, v -> (v % 2 == 0) ? v : null);
var expected = List.of(Pair.of(4, 6));
for (var pair : expected) {
Assertions.assertTrue(pit.hasNext());
Assertions.assertEquals(pair, pit.next());
}
}
@Test
public void ltTest() {
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
var expected = List.of(Pair.of(4, 6));
for (var pair : expected) {
Assertions.assertTrue(pit.hasNext());
Assertions.assertEquals(pair, pit.next());
}
Assertions.assertFalse(pit.hasNext());
}
@Test
public void ltTest2() {
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 1),
IteratorStart.LT, 1, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 2),
IteratorStart.LT, 2, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 4),
IteratorStart.LE, 4, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6));
Assertions.assertFalse(pit.hasNext());
}
@Test
public void ltTest3() {
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 7).plus(6, 8);
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7),
IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(6, 8));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 8),
IteratorStart.LT, 8, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(6, 8));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 6),
IteratorStart.LE, 6, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(6, 8));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
Assertions.assertTrue(pit.hasNext());
Assertions.assertEquals(4, pit.peekNextKey());
Assertions.assertFalse(pit.hasPrev());
Assertions.assertEquals(4, pit.peekNextKey());
Assertions.assertFalse(pit.hasPrev());
Assertions.assertEquals(Pair.of(4, 6), pit.next());
Assertions.assertTrue(pit.hasNext());
Assertions.assertEquals(6, pit.peekNextKey());
Assertions.assertEquals(4, pit.peekPrevKey());
Assertions.assertEquals(6, pit.peekNextKey());
Assertions.assertEquals(4, pit.peekPrevKey());
}
@Test
public void itTest4() {
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 8).plus(6, 10);
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(5, 8), Pair.of(6, 10));
Assertions.assertFalse(pit.hasNext());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7),
IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null);
Just.checkIterator(pit, Pair.of(6, 10));
Assertions.assertFalse(pit.hasNext());
Assertions.assertTrue(pit.hasPrev());
Assertions.assertEquals(6, pit.peekPrevKey());
Assertions.assertEquals(Pair.of(6, 10), pit.prev());
Assertions.assertTrue(pit.hasNext());
Assertions.assertEquals(6, pit.peekNextKey());
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
Assertions.assertTrue(pit.hasNext());
Assertions.assertEquals(5, pit.peekNextKey());
Assertions.assertTrue(pit.hasPrev());
Assertions.assertEquals(4, pit.peekPrevKey());
Assertions.assertEquals(5, pit.peekNextKey());
Assertions.assertEquals(4, pit.peekPrevKey());
Assertions.assertEquals(Pair.of(5, 8), pit.next());
Assertions.assertTrue(pit.hasNext());
Assertions.assertEquals(6, pit.peekNextKey());
Assertions.assertEquals(5, pit.peekPrevKey());
Assertions.assertEquals(6, pit.peekNextKey());
Assertions.assertEquals(5, pit.peekPrevKey());
}
// @Test
// public void reverseTest() {
// var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
// var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
// IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
//
// }
}

View File

@@ -0,0 +1,275 @@
package com.usatiuk.objects.iterators;
import net.jqwik.api.*;
import net.jqwik.api.state.Action;
import net.jqwik.api.state.ActionChain;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.jupiter.api.Assertions;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
public class TombstoneSkippingIteratorPbtTest {
@Property
public void checkMergingIterator(@ForAll("actions") ActionChain<MergingIteratorModel> actions) {
actions.run();
}
@Provide
Arbitrary<ActionChain<MergingIteratorModel>> actions(@ForAll("lists") List<List<Map.Entry<Integer, MaybeTombstone<Integer>>>> list,
@ForAll IteratorStart iteratorStart, @ForAll("startKey") Integer startKey) {
return ActionChain.startWith(() -> new MergingIteratorModel(list, iteratorStart, startKey))
.withAction(new NextAction())
.withAction(new PeekNextKeyAction())
.withAction(new SkipAction())
.withAction(new PeekPrevKeyAction())
.withAction(new SkipPrevAction())
.withAction(new PrevAction())
.withAction(new HasNextAction())
.withAction(new HasPrevAction());
}
@Provide
Arbitrary<List<List<Map.Entry<Integer, MaybeTombstone<Integer>>>>> lists() {
return Arbitraries.entries(Arbitraries.integers().between(-50, 50),
Arbitraries.integers().between(-50, 50).flatMap(i -> Arbitraries.of(true, false).<MaybeTombstone<Integer>>flatMap(
b -> b ? Arbitraries.just(new DataWrapper<Integer>(i)) : Arbitraries.just(new TombstoneImpl<>())
))
)
.list().uniqueElements(Map.Entry::getKey).ofMinSize(0).ofMaxSize(20)
.list().ofMinSize(1).ofMaxSize(5);
}
@Provide
Arbitrary<Integer> startKey() {
return Arbitraries.integers().between(-51, 51);
}
static class MergingIteratorModel implements CloseableKvIterator<Integer, Integer> {
private final CloseableKvIterator<Integer, Integer> mergedIterator;
private final CloseableKvIterator<Integer, Integer> mergingIterator;
private MergingIteratorModel(List<List<Map.Entry<Integer, MaybeTombstone<Integer>>>> pairs, IteratorStart startType, Integer startKey) {
TreeMap<Integer, MaybeTombstone<Integer>> perfectMergedTombstones = new TreeMap<>();
for (List<Map.Entry<Integer, MaybeTombstone<Integer>>> list : pairs) {
for (Map.Entry<Integer, MaybeTombstone<Integer>> pair : list) {
perfectMergedTombstones.putIfAbsent(pair.getKey(), pair.getValue());
}
}
TreeMap<Integer, Integer> perfectMerged = new TreeMap<>();
for (var e : perfectMergedTombstones.entrySet()) {
if (e.getValue() instanceof Data<Integer> data)
perfectMerged.put(e.getKey(), data.value());
}
mergedIterator = new NavigableMapKvIterator<>(perfectMerged, startType, startKey);
mergingIterator = new TombstoneSkippingIterator<>(startType, startKey, pairs.stream().<CloseableKvIterator<Integer, MaybeTombstone<Integer>>>map(
list -> new NavigableMapKvIterator<Integer, MaybeTombstone<Integer>>(new TreeMap<Integer, MaybeTombstone<Integer>>(Map.ofEntries(list.toArray(Map.Entry[]::new))), startType, startKey)
).toList());
}
@Override
public Integer peekNextKey() {
var mergedKey = mergedIterator.peekNextKey();
var mergingKey = mergingIterator.peekNextKey();
Assertions.assertEquals(mergedKey, mergingKey);
return mergedKey;
}
@Override
public void skip() {
mergedIterator.skip();
mergingIterator.skip();
}
@Override
public Integer peekPrevKey() {
var mergedKey = mergedIterator.peekPrevKey();
var mergingKey = mergingIterator.peekPrevKey();
Assertions.assertEquals(mergedKey, mergingKey);
return mergedKey;
}
@Override
public Pair<Integer, Integer> prev() {
var mergedKey = mergedIterator.prev();
var mergingKey = mergingIterator.prev();
Assertions.assertEquals(mergedKey, mergingKey);
return mergedKey;
}
@Override
public boolean hasPrev() {
var mergedKey = mergedIterator.hasPrev();
var mergingKey = mergingIterator.hasPrev();
Assertions.assertEquals(mergedKey, mergingKey);
return mergedKey;
}
@Override
public void skipPrev() {
mergedIterator.skipPrev();
mergingIterator.skipPrev();
}
@Override
public void close() {
mergedIterator.close();
mergingIterator.close();
}
@Override
public boolean hasNext() {
var mergedKey = mergedIterator.hasNext();
var mergingKey = mergingIterator.hasNext();
Assertions.assertEquals(mergedKey, mergingKey);
return mergedKey;
}
@Override
public Pair<Integer, Integer> next() {
var mergedKey = mergedIterator.next();
var mergingKey = mergingIterator.next();
Assertions.assertEquals(mergedKey, mergingKey);
return mergedKey;
}
}
static class PeekNextKeyAction extends Action.JustMutate<MergingIteratorModel> {
@Override
public void mutate(MergingIteratorModel state) {
state.peekNextKey();
}
@Override
public boolean precondition(MergingIteratorModel state) {
return state.hasNext();
}
@Override
public String description() {
return "Peek next key";
}
}
static class SkipAction extends Action.JustMutate<MergingIteratorModel> {
@Override
public void mutate(MergingIteratorModel state) {
state.skip();
}
@Override
public boolean precondition(MergingIteratorModel state) {
return state.hasNext();
}
@Override
public String description() {
return "Skip next key";
}
}
static class PeekPrevKeyAction extends Action.JustMutate<MergingIteratorModel> {
@Override
public void mutate(MergingIteratorModel state) {
state.peekPrevKey();
}
@Override
public boolean precondition(MergingIteratorModel state) {
return state.hasPrev();
}
@Override
public String description() {
return "Peek prev key";
}
}
static class SkipPrevAction extends Action.JustMutate<MergingIteratorModel> {
@Override
public void mutate(MergingIteratorModel state) {
state.skipPrev();
}
@Override
public boolean precondition(MergingIteratorModel state) {
return state.hasPrev();
}
@Override
public String description() {
return "Skip prev key";
}
}
static class PrevAction extends Action.JustMutate<MergingIteratorModel> {
@Override
public void mutate(MergingIteratorModel state) {
state.prev();
}
@Override
public boolean precondition(MergingIteratorModel state) {
return state.hasPrev();
}
@Override
public String description() {
return "Prev key";
}
}
static class NextAction extends Action.JustMutate<MergingIteratorModel> {
@Override
public void mutate(MergingIteratorModel state) {
state.next();
}
@Override
public boolean precondition(MergingIteratorModel state) {
return state.hasNext();
}
@Override
public String description() {
return "Next key";
}
}
static class HasNextAction extends Action.JustMutate<MergingIteratorModel> {
@Override
public void mutate(MergingIteratorModel state) {
state.hasNext();
}
@Override
public boolean precondition(MergingIteratorModel state) {
return true;
}
@Override
public String description() {
return "Has next key";
}
}
static class HasPrevAction extends Action.JustMutate<MergingIteratorModel> {
@Override
public void mutate(MergingIteratorModel state) {
state.hasPrev();
}
@Override
public boolean precondition(MergingIteratorModel state) {
return true;
}
@Override
public String description() {
return "Has prev key";
}
}
}

View File

@@ -1,130 +0,0 @@
package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.Just;
import com.usatiuk.objects.TempDataProfile;
import com.usatiuk.objects.iterators.IteratorStart;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.RepeatedTest;
import java.nio.ByteBuffer;
import java.util.List;
class Profiles {
public static class LmdbKvIteratorTestProfile extends TempDataProfile {
}
}
@QuarkusTest
@TestProfile(Profiles.LmdbKvIteratorTestProfile.class)
public class LmdbKvIteratorTest {
@Inject
LmdbObjectPersistentStore store;
long getNextTxId() {
try (var s = store.getSnapshot()) {
return s.id() + 1;
}
}
@RepeatedTest(100)
public void iteratorTest1() {
store.prepareTx(
new TxManifestRaw(
List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})),
Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})),
Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))),
List.of()
), getNextTxId()
).run();
try (var snapshot = store.getSnapshot()) {
var iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(""));
Just.checkIterator(iterator, List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteBuffer.wrap(new byte[]{2})),
Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})),
Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4}))));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(3)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(2)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(2)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(3)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(2)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteBuffer.wrap(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(1)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteBuffer.wrap(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(1)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteBuffer.wrap(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(3)));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(4)));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(0)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteBuffer.wrap(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2)));
Assertions.assertTrue(iterator.hasNext());
Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey());
Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey());
Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey());
Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey());
Just.checkIterator(iterator.reversed(), Pair.of(JObjectKey.of(Long.toString(1)), ByteBuffer.wrap(new byte[]{2})));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteBuffer.wrap(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})));
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(3)), ByteBuffer.wrap(new byte[]{4})), iterator.prev());
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), iterator.prev());
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteBuffer.wrap(new byte[]{3})), iterator.next());
iterator.close();
}
store.prepareTx(new TxManifestRaw(
List.of(),
List.of(JObjectKey.of(Long.toString(1)), JObjectKey.of(Long.toString(2)), JObjectKey.of(Long.toString(3)))
),
getNextTxId()
).run();
}
}

View File

@@ -14,7 +14,6 @@
<module>sync-base</module>
<module>dhfs-fs</module>
<module>dhfs-fuse</module>
<module>dhfs-app</module>
<module>kleppmanntree</module>
<module>objects</module>
<module>utils</module>
@@ -102,6 +101,7 @@
<arg>-parameters</arg>
<arg>--add-exports</arg>
<arg>java.base/jdk.internal.access=ALL-UNNAMED</arg>
<arg>--enable-preview</arg>
</compilerArgs>
</configuration>
</plugin>
@@ -119,6 +119,7 @@
--add-exports java.base/sun.nio.ch=ALL-UNNAMED
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
--add-opens=java.base/java.nio=ALL-UNNAMED
--enable-preview
</argLine>
<skipTests>${skip.unit}</skipTests>
<redirectTestOutputToFile>true</redirectTestOutputToFile>

View File

@@ -19,8 +19,6 @@ import jakarta.inject.Inject;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -56,27 +54,21 @@ public class AutosyncProcessor {
if (downloadAll)
executorService.submit(() -> {
Log.info("Adding all to autosync");
List<JObjectKey> objs = new LinkedList<>();
txm.run(() -> {
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
while (it.hasNext()) {
var key = it.peekNextKey();
objs.add(key);
// TODO: Nested transactions
txm.run(() -> {
var gotObj = curTx.get(JData.class, key).orElse(null);
if (!(gotObj instanceof RemoteObjectMeta meta))
return;
if (!meta.hasLocalData())
add(meta.key());
}, true);
it.skip();
}
}
});
for (var obj : objs) {
txm.run(() -> {
var gotObj = curTx.get(JData.class, obj).orElse(null);
if (!(gotObj instanceof RemoteObjectMeta meta))
return;
if (!meta.hasLocalData())
add(meta.key());
});
}
Log.info("Adding all to autosync: finished");
});
}

View File

@@ -1,13 +1,14 @@
package com.usatiuk.dhfs.invalidation;
import com.usatiuk.dhfs.peersync.PeerId;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import org.pcollections.PMap;
import java.util.Collection;
import java.util.List;
public record IndexUpdateOp(JObjectKey key, PMap<PeerId, Long> changelog) implements Op {
public record IndexUpdateOp(JObjectKey key, PMap<PeerId, Long> changelog, JDataRemoteDto data) implements Op {
@Override
public Collection<JObjectKey> getEscapedRefs() {
return List.of(key);

View File

@@ -1,10 +1,14 @@
package com.usatiuk.dhfs.invalidation;
import com.usatiuk.dhfs.peersync.PeerId;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.dhfs.remoteobj.JDataRemotePush;
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfs.syncmap.DtoMapperService;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import io.quarkus.logging.Log;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
@@ -19,11 +23,22 @@ public class RemoteObjectMetaOpExtractor implements OpExtractor<RemoteObjectMeta
Transaction curTx;
@Inject
RemoteTransaction remoteTransaction;
@Inject
DtoMapperService dtoMapperService;
@Override
public Pair<List<Op>, Runnable> extractOps(RemoteObjectMeta data, PeerId peerId) {
return txm.run(() -> {
return Pair.of(List.of(new IndexUpdateOp(data.key(), data.changelog())), () -> {
JDataRemoteDto dto =
data.knownType().isAnnotationPresent(JDataRemotePush.class)
? remoteTransaction.getData(data.knownType(), data.key())
.map(d -> dtoMapperService.toDto(d, d.dtoClass())).orElse(null)
: null;
if (data.knownType().isAnnotationPresent(JDataRemotePush.class) && dto == null) {
Log.warnv("Failed to get data for push {0} of type {1}", data.key(), data.knownType());
}
return Pair.of(List.of(new IndexUpdateOp(data.key(), data.changelog(), dto)), () -> {
});
});
}

View File

@@ -24,6 +24,6 @@ public class JKleppmannTreePeerInterface implements PeerInterface<PeerId> {
@Override
public Collection<PeerId> getAllPeers() {
return peerInfoService.getPeers().stream().map(PeerInfo::id).toList();
return peerInfoService.getSynchronizedPeers().stream().map(PeerInfo::id).toList();
}
}

View File

@@ -4,24 +4,48 @@ import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.peertrust.CertificateTools;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.dhfs.remoteobj.JDataRemotePush;
import com.usatiuk.objects.JObjectKey;
import org.pcollections.HashTreePMap;
import org.pcollections.PMap;
import java.security.cert.X509Certificate;
public record PeerInfo(JObjectKey key, PeerId id, ByteString cert) implements JDataRemote, JDataRemoteDto {
@JDataRemotePush
public record PeerInfo(JObjectKey key, PeerId id, ByteString cert,
PMap<PeerId, Long> kickCounter,
long lastSeenTimestamp) implements JDataRemote, JDataRemoteDto {
public PeerInfo(PeerId id, byte[] cert) {
this(id.toJObjectKey(), id, ByteString.copyFrom(cert));
this(id.toJObjectKey(), id, ByteString.copyFrom(cert), HashTreePMap.empty(), System.currentTimeMillis());
}
public X509Certificate parsedCert() {
return CertificateTools.certFromBytes(cert.toByteArray());
}
public PeerInfo withKickCounter(PMap<PeerId, Long> kickCounter) {
return new PeerInfo(key, id, cert, kickCounter, lastSeenTimestamp);
}
public PeerInfo withIncrementedKickCounter(PeerId peerId) {
return new PeerInfo(key, id, cert, kickCounter.plus(peerId, kickCounter.getOrDefault(peerId, 0L) + 1), lastSeenTimestamp);
}
public PeerInfo withLastSeenTimestamp(long lastSeenTimestamp) {
return new PeerInfo(key, id, cert, kickCounter, lastSeenTimestamp);
}
public long kickCounterSum() {
return kickCounter.values().stream().mapToLong(Long::longValue).sum();
}
@Override
public String toString() {
return "PeerInfo{" +
"key=" + key +
", id=" + id +
", kickCounter=" + kickCounter +
", lastSeenTimestamp=" + lastSeenTimestamp +
'}';
}
}

View File

@@ -87,17 +87,27 @@ public class PeerInfoService {
public List<PeerInfo> getPeersNoSelf() {
return jObjectTxManager.run(() -> {
var gotKey = getTreeR().traverse(List.of());
return curTx.get(JKleppmannTreeNodeHolder.class, gotKey).map(JKleppmannTreeNodeHolder::node).map(
node -> node.children().keySet().stream()
.map(JObjectKey::of).map(this::getPeerInfoImpl)
.filter(o -> {
if (o.isEmpty())
Log.warnv("Could not get peer info for peer {0}", o);
return o.isPresent();
}).map(Optional::get).filter(
peerInfo -> !peerInfo.id().equals(persistentPeerDataService.getSelfUuid())).toList())
.orElseThrow();
return getPeers().stream().filter(
peerInfo -> !peerInfo.id().equals(persistentPeerDataService.getSelfUuid())).toList();
});
}
public List<PeerInfo> getSynchronizedPeers() {
return jObjectTxManager.run(() -> {
return getPeers().stream().filter(pi -> {
if (pi.id().equals(persistentPeerDataService.getSelfUuid())) {
return true;
}
return persistentPeerDataService.isInitialSyncDone(pi.id());
}).toList();
});
}
public List<PeerInfo> getSynchronizedPeersNoSelf() {
return jObjectTxManager.run(() -> {
return getPeersNoSelf().stream().filter(pi -> {
return persistentPeerDataService.isInitialSyncDone(pi.id());
}).toList();
});
}

View File

@@ -89,10 +89,11 @@ public class PeerInfoSyncHandler implements ObjSyncHandler<PeerInfo, PeerInfo> {
if (oursCurData == null)
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
if (!receivedData.equals(oursCurData))
throw new StatusRuntimeException(Status.ABORTED.withDescription("PeerInfo data conflict"));
if (!receivedData.cert().equals(oursCurData.cert()))
throw new StatusRuntimeException(Status.ABORTED.withDescription("PeerInfo certificate conflict for " + key));
HashPMap<PeerId, Long> newChangelog = HashTreePMap.from(current.changelog());
HashPMap<PeerId, Long> newKickCounter = HashTreePMap.from(oursCurData.kickCounter());
for (var entry : receivedChangelog.entrySet()) {
newChangelog = newChangelog.plus(entry.getKey(),
@@ -100,6 +101,20 @@ public class PeerInfoSyncHandler implements ObjSyncHandler<PeerInfo, PeerInfo> {
);
}
for (var entry : receivedData.kickCounter().entrySet()) {
newKickCounter = newKickCounter.plus(entry.getKey(),
Long.max(newKickCounter.getOrDefault(entry.getKey(), 0L), entry.getValue())
);
}
var newData = oursCurData.withKickCounter(newKickCounter)
.withLastSeenTimestamp(Math.max(oursCurData.lastSeenTimestamp(), receivedData.lastSeenTimestamp()));
if (!newData.equals(oursCurData))
newChangelog = newChangelog.plus(persistentPeerDataService.getSelfUuid(), newChangelog.getOrDefault(persistentPeerDataService.getSelfUuid(), 0L) + 1L);
remoteTx.putDataRaw(newData);
current = current.withChangelog(newChangelog);
}
}

View File

@@ -0,0 +1,60 @@
package com.usatiuk.dhfs.peersync;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import io.quarkus.scheduler.Scheduled;
import io.smallrye.common.annotation.Blocking;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.eclipse.microprofile.config.inject.ConfigProperty;
@ApplicationScoped
public class PeerLastSeenUpdater {
@Inject
PeerManager peerManager;
@Inject
PeerInfoService peerInfoService;
@Inject
Transaction curTx;
@Inject
TransactionManager txm;
@Inject
RemoteTransaction remoteTransaction;
@ConfigProperty(name = "dhfs.objects.last-seen.timeout")
long lastSeenTimeout;
@Inject
PersistentPeerDataService persistentPeerDataService;
@Scheduled(every = "${dhfs.objects.last-seen.update}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP)
@Blocking
void update() {
var snapshot = peerManager.getHostStateSnapshot();
for (var a : snapshot.available()) {
txm.run(() -> {
var curInfo = remoteTransaction.getData(PeerInfo.class, a.id()).orElse(null);
if (curInfo == null) return;
var newInfo = curInfo.withLastSeenTimestamp(System.currentTimeMillis());
remoteTransaction.putData(newInfo);
});
}
for (var u : snapshot.unavailable()) {
txm.run(() -> {
if (!persistentPeerDataService.isInitialSyncDone(u))
return;
var curInfo = remoteTransaction.getData(PeerInfo.class, u.id()).orElse(null);
if (curInfo == null) return;
var lastSeen = curInfo.lastSeenTimestamp();
if (System.currentTimeMillis() - lastSeen > (lastSeenTimeout * 1000)) {
var kicked = curInfo.withIncrementedKickCounter(persistentPeerDataService.getSelfUuid());
remoteTransaction.putData(kicked);
}
});
}
}
}

View File

@@ -125,19 +125,23 @@ public class PeerManager {
}
}
public void handleConnectionError(com.usatiuk.dhfs.peersync.PeerInfo host) {
public void handleConnectionError(PeerId host) {
boolean wasReachable = isReachable(host);
if (wasReachable)
Log.infov("Lost connection to {0}", host);
_states.remove(host.id());
_states.remove(host);
for (var l : _disconnectedListeners) {
l.handlePeerDisconnected(host.id());
l.handlePeerDisconnected(host);
}
}
public void handleConnectionError(com.usatiuk.dhfs.peersync.PeerInfo host) {
handleConnectionError(host.id());
}
// FIXME:
private boolean pingCheck(com.usatiuk.dhfs.peersync.PeerInfo host, PeerAddress address) {
try {

View File

@@ -51,6 +51,8 @@ public class PersistentPeerDataService {
PeerInfoService peerInfoService;
@Inject
TransactionManager txm;
@Inject
PeerManager peerManager;
@ConfigProperty(name = "dhfs.peerdiscovery.preset-uuid")
Optional<String> presetUuid;
@@ -89,21 +91,6 @@ public class PersistentPeerDataService {
Files.write(Path.of(stuffRoot, "self_uuid"), _selfUuid.id().toString().getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
}
// private void pushPeerUpdates() {
// pushPeerUpdates(null);
// }
// private void pushPeerUpdates(@Nullable JObject<?> obj) {
// if (obj != null)
// Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated");
// executorService.submit(() -> {
// updateCerts();
// invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName);
// for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList()))
// invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p));
// });
// }
public PeerId getSelfUuid() {
return _selfUuid;
}
@@ -148,6 +135,7 @@ public class PersistentPeerDataService {
}
curTx.put(data.withInitialSyncDone(data.initialSyncDone().minus(peerId)));
Log.infov("Did reset sync state for {0}", peerId);
curTx.onCommit(() -> peerManager.handleConnectionError(peerId));
return true;
});
}
@@ -160,7 +148,6 @@ public class PersistentPeerDataService {
});
}
public List<IpPeerAddress> getPersistentPeerAddresses() {
return txm.run(() -> {
var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null);

View File

@@ -1,7 +1,6 @@
package com.usatiuk.dhfs.peertrust;
import com.usatiuk.dhfs.invalidation.InvalidationQueueService;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
import com.usatiuk.dhfs.peersync.PeerInfo;
import com.usatiuk.dhfs.peersync.PeerInfoService;
@@ -43,7 +42,7 @@ public class PeerInfoCertUpdateTxHook implements PreCommitTxHook {
for (var curRef : oldNode.node().children().entrySet()) {
if (!n.node().children().containsKey(curRef.getKey())) {
Log.infov("Will reset sync state for {0}", curRef.getValue());
curTx.onCommit(() -> persistentPeerDataService.resetInitialSyncDone(JKleppmannTreeNodeMetaPeer.nodeIdToPeerId(curRef.getValue())));
persistentPeerDataService.resetInitialSyncDone(JKleppmannTreeNodeMetaPeer.nodeIdToPeerId(curRef.getValue()));
}
}
return;
@@ -54,9 +53,16 @@ public class PeerInfoCertUpdateTxHook implements PreCommitTxHook {
return;
}
if (!(remote.data() instanceof PeerInfo))
if (!(remote.data() instanceof PeerInfo curPi))
return;
var oldPi = (PeerInfo) ((RemoteObjectDataWrapper) old).data();
if (oldPi.kickCounterSum() != curPi.kickCounterSum()) {
Log.warnv("Peer kicked out: {0} to {1}", key, cur);
persistentPeerDataService.resetInitialSyncDone(curPi.id());
}
Log.infov("Changed peer info: {0} to {1}", key, cur);
curTx.onCommit(() -> invalidationQueueService.pushInvalidationToAll(key));

View File

@@ -20,7 +20,7 @@ public class IndexUpdateOpHandler implements OpHandler<IndexUpdateOp> {
@Override
public void handleOp(PeerId from, IndexUpdateOp op) {
txm.run(() -> {
syncHandler.handleRemoteUpdate(from, op.key(), op.changelog(), null);
syncHandler.handleRemoteUpdate(from, op.key(), op.changelog(), op.data());
});
}
}

View File

@@ -0,0 +1,11 @@
package com.usatiuk.dhfs.remoteobj;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface JDataRemotePush {
}

View File

@@ -132,7 +132,7 @@ public class RemoteObjectDeleter {
return true;
}
var knownHosts = peerInfoService.getPeersNoSelf();
var knownHosts = peerInfoService.getSynchronizedPeersNoSelf();
RemoteObjectMeta finalTarget = target;
List<PeerId> missing = knownHosts.stream()
.map(PeerInfo::id)
@@ -187,7 +187,7 @@ public class RemoteObjectDeleter {
if (!obj.seen())
return true;
var knownHosts = peerInfoService.getPeersNoSelf();
var knownHosts = peerInfoService.getSynchronizedPeersNoSelf();
boolean missing = false;
for (var x : knownHosts) {
if (!obj.confirmedDeletes().contains(x.id())) {

View File

@@ -93,6 +93,11 @@ public class RemoteTransaction {
curTx.put(newData);
}
public <T extends JDataRemote> void putDataNew(T obj) {
curTx.putNew(new RemoteObjectMeta(obj, persistentPeerDataService.getSelfUuid()));
curTx.putNew(new RemoteObjectDataWrapper<>(obj));
}
public <T extends JDataRemote> void putData(T obj) {
var curMeta = getMeta(obj.key()).orElse(null);

View File

@@ -23,7 +23,10 @@ import org.pcollections.PMap;
import javax.annotation.Nullable;
import java.lang.reflect.ParameterizedType;
import java.util.*;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Stream;
@ApplicationScoped
@@ -124,52 +127,42 @@ public class SyncHandler {
public void resyncAfterCrash(@Observes @Priority(100000) StartupEvent event) {
if (shutdownChecker.lastShutdownClean())
return;
List<JObjectKey> objs = new LinkedList<>();
txm.run(() -> {
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
while (it.hasNext()) {
var key = it.peekNextKey();
objs.add(key);
// TODO: Nested transactions
txm.run(() -> {
var proc = curTx.get(JData.class, key).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
if (proc != null) {
proc.handleCrash(key);
}
Log.infov("Handled crash of {0}", key);
}, true);
it.skip();
}
}
});
for (var obj : objs) {
txm.run(() -> {
var proc = curTx.get(JData.class, obj).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
if (proc != null) {
proc.handleCrash(obj);
}
Log.infov("Handled crash of {0}", obj);
});
}
}
public void doInitialSync(PeerId peer) {
List<JObjectKey> objs = new LinkedList<>();
txm.run(() -> {
Log.tracev("Will do initial sync for {0}", peer);
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
while (it.hasNext()) {
var key = it.peekNextKey();
objs.add(key);
// TODO: Nested transactions
txm.run(() -> {
var proc = curTx.get(JData.class, key).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
if (proc != null) {
proc.prepareForInitialSync(peer, key);
}
Log.infov("Adding to initial sync for peer {0}: {1}", peer, key);
invalidationQueueService.pushInvalidationToOne(peer, key);
}, true);
it.skip();
}
}
});
for (var obj : objs) {
txm.run(() -> {
var proc = curTx.get(JData.class, obj).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
if (proc != null) {
proc.prepareForInitialSync(peer, obj);
}
Log.infov("Adding to initial sync for peer {0}: {1}", peer, obj);
invalidationQueueService.pushInvalidationToOne(peer, obj);
});
}
}
}

View File

@@ -4,93 +4,52 @@ import jakarta.annotation.Nonnull;
import jakarta.annotation.Nullable;
import java.lang.ref.Cleaner;
import java.lang.ref.WeakReference;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
public class DataLocker {
private static final AutoCloseableNoThrow DUMMY_LOCK = () -> {
};
private final ConcurrentHashMap<Object, LockTag> _locks = new ConcurrentHashMap<>();
private final ConcurrentHashMap<Object, WeakReference<ReentrantLock>> _locks = new ConcurrentHashMap<>();
private static final Cleaner CLEANER = Cleaner.create();
private Lock getTag(Object data) {
var newTag = new ReentrantLock();
var newTagRef = new WeakReference<>(newTag);
while (true) {
var oldTagRef = _locks.putIfAbsent(data, newTagRef);
var oldTag = oldTagRef != null ? oldTagRef.get() : null;
if (oldTag == null && oldTagRef != null) {
_locks.remove(data, oldTagRef);
continue;
}
if (oldTag != null)
return oldTag;
CLEANER.register(newTag, () -> {
_locks.remove(data, newTagRef);
});
return newTag;
}
}
@Nonnull
public AutoCloseableNoThrow lock(Object data) {
while (true) {
var newTag = new LockTag();
var oldTag = _locks.putIfAbsent(data, newTag);
if (oldTag == null) {
return new Lock(data, newTag);
}
try {
synchronized (oldTag) {
while (!oldTag.released) {
if (oldTag.owner == Thread.currentThread()) {
return DUMMY_LOCK;
}
oldTag.wait();
// tag.wait(4000L);
// if (!tag.released) {
// System.out.println("Timeout waiting for lock: " + data);
// System.exit(1);
// throw new InterruptedException();
// }
}
}
} catch (InterruptedException ignored) {
}
}
var lock = getTag(data);
lock.lock();
return lock::unlock;
}
@Nullable
public AutoCloseableNoThrow tryLock(Object data) {
while (true) {
var newTag = new LockTag();
var oldTag = _locks.putIfAbsent(data, newTag);
if (oldTag == null) {
return new Lock(data, newTag);
}
synchronized (oldTag) {
if (!oldTag.released) {
if (oldTag.owner == Thread.currentThread()) {
return DUMMY_LOCK;
}
return null;
}
}
var lock = getTag(data);
if (lock.tryLock()) {
return lock::unlock;
} else {
return null;
}
}
private static class LockTag {
final Thread owner = Thread.currentThread();
// final StackTraceElement[] _creationStack = Thread.currentThread().getStackTrace();
boolean released = false;
}
private class Lock implements AutoCloseableNoThrow {
private static final Cleaner CLEANER = Cleaner.create();
private final Object _key;
private final LockTag _tag;
public Lock(Object key, LockTag tag) {
_key = key;
_tag = tag;
// CLEANER.register(this, () -> {
// if (!tag.released) {
// Log.error("Lock collected without release: " + key);
// }
// });
}
@Override
public void close() {
synchronized (_tag) {
if (_tag.released)
return;
_tag.released = true;
// Notify all because when the object is locked again,
// it's a different lock tag
_tag.notifyAll();
_locks.remove(_key, _tag);
}
}
}
}

View File

@@ -34,11 +34,9 @@ public class HashSetDelayedBlockingQueue<T> {
synchronized (this) {
if (_closed) throw new IllegalStateException("Adding to a queue that is closed!");
if (_set.containsKey(el))
if (_set.putIfAbsent(el, new SetElement<>(el, System.currentTimeMillis())) != null)
return false;
_set.put(el, new SetElement<>(el, System.currentTimeMillis()));
this.notify();
return true;
}

View File

@@ -0,0 +1,33 @@
package com.usatiuk.utils;
import java.util.List;
import java.util.function.Function;
public class ListUtils {
public static <T, T_V> List<T_V> prependAndMap(T_V item, List<T> suffix, Function<T, T_V> suffixFn) {
T_V[] arr = (T_V[]) new Object[suffix.size() + 1];
arr[0] = item;
for (int i = 0; i < suffix.size(); i++) {
arr[i + 1] = suffixFn.apply(suffix.get(i));
}
return List.of(arr);
}
public static <T> List<T> prepend(T item, List<T> suffix) {
T[] arr = (T[]) new Object[suffix.size() + 1];
arr[0] = item;
for (int i = 0; i < suffix.size(); i++) {
arr[i + 1] = suffix.get(i);
}
return List.of(arr);
}
public static <T, T_V> List<T_V> map(List<T> suffix, Function<T, T_V> suffixFn) {
T_V[] arr = (T_V[]) new Object[suffix.size()];
for (int i = 0; i < suffix.size(); i++) {
arr[i] = suffixFn.apply(suffix.get(i));
}
return List.of(arr);
}
}

View File

@@ -0,0 +1,44 @@
package com.usatiuk.utils;
import java.lang.foreign.*;
import java.lang.invoke.MethodHandle;
import java.nio.ByteBuffer;
import java.util.function.Consumer;
public class UninitializedByteBuffer {
private static final Linker LINKER = Linker.nativeLinker();
private static final MethodHandle malloc = LINKER.downcallHandle(
LINKER.defaultLookup().find("malloc").orElseThrow(),
FunctionDescriptor.of(ValueLayout.ADDRESS, ValueLayout.JAVA_LONG)
);
private static final MethodHandle free = LINKER.downcallHandle(
LINKER.defaultLookup().find("free").orElseThrow(),
FunctionDescriptor.ofVoid(ValueLayout.ADDRESS)
);
public static ByteBuffer allocate(int capacity) {
UnsafeAccessor.NIO.reserveMemory(capacity, capacity);
MemorySegment segment = null;
try {
segment = (MemorySegment) malloc.invokeExact((long) capacity);
} catch (Throwable e) {
throw new RuntimeException(e);
}
Consumer<MemorySegment> cleanup = s -> {
try {
free.invokeExact(s);
UnsafeAccessor.NIO.unreserveMemory(capacity, capacity);
} catch (Throwable e) {
throw new RuntimeException(e);
}
};
var reint = segment.reinterpret(capacity, Arena.ofAuto(), cleanup);
return reint.asByteBuffer();
}
public static long getAddress(ByteBuffer buffer) {
return UnsafeAccessor.NIO.getBufferAddress(buffer);
}
}

View File

@@ -0,0 +1,23 @@
package com.usatiuk.utils;
import jdk.internal.access.JavaNioAccess;
import jdk.internal.access.SharedSecrets;
import sun.misc.Unsafe;
import java.lang.reflect.Field;
public abstract class UnsafeAccessor {
public static final JavaNioAccess NIO;
public static final Unsafe UNSAFE;
static {
try {
NIO = SharedSecrets.getJavaNioAccess();
Field f = Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
UNSAFE = (Unsafe) f.get(null);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

View File

@@ -25,6 +25,7 @@ echo "Extra options: $EXTRAOPTS_PARSED"
java \
-Xmx512M \
--enable-preview \
-Ddhfs.objects.writeback.limit=134217728 \
-Ddhfs.objects.lru.limit=134217728 \
--add-exports java.base/sun.nio.ch=ALL-UNNAMED \
@@ -37,7 +38,7 @@ java \
-Dquarkus.log.category.\"com.usatiuk\".level=INFO \
-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=INFO \
-Ddhfs.webui.root="$SCRIPT_DIR"/Webui $EXTRAOPTS_PARSED \
-jar "$SCRIPT_DIR"/"DHFS Package"/quarkus-run.jar >quarkus.log 2>&1 &
-jar "$SCRIPT_DIR"/"Server"/quarkus-run.jar >quarkus.log 2>&1 &
echo "Started $!"

48
run-wrapper/run.ps1 Normal file
View File

@@ -0,0 +1,48 @@
$ErrorActionPreference = 'Stop'
$PIDFILE = Join-Path $PSScriptRoot ".pid"
$EXTRAOPTS = Join-Path $PSScriptRoot "extra-opts"
if (-Not (Test-Path $EXTRAOPTS)) {
New-Item -ItemType File -Path $EXTRAOPTS | Out-Null
}
if (Test-Path $PIDFILE) {
$ReadPID = Get-Content $PIDFILE
if (Get-Process -Id $ReadPID -ErrorAction SilentlyContinue) {
Write-Host "Already running: $ReadPID"
exit 2
}
}
$ExtraOptsParsed = Get-Content $EXTRAOPTS | Where-Object {$_}
Write-Host "Extra options: $($ExtraOptsParsed -join ' ')"
$JAVA_OPTS = @(
"-Xmx512M"
"--enable-preview"
"-Ddhfs.objects.writeback.limit=134217728"
"-Ddhfs.objects.lru.limit=134217728"
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED"
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED"
"--add-opens=java.base/java.nio=ALL-UNNAMED"
"-Ddhfs.objects.persistence.files.root=$($PSScriptRoot)\..\data\objects"
"-Ddhfs.objects.persistence.stuff.root=$($PSScriptRoot)\..\data\stuff"
"-Ddhfs.objects.persistence.lmdb.size=1000000000"
"-Ddhfs.fuse.root=Z:\"
"-Dquarkus.http.host=0.0.0.0"
'-Dquarkus.log.category.\"com.usatiuk\".level=INFO'
'-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=INFO'
"-Ddhfs.webui.root=$($PSScriptRoot)\Webui"
) + $ExtraOptsParsed + @(
"-jar", "`"$PSScriptRoot\Server\quarkus-run.jar`""
)
$Process = Start-Process -FilePath "java" -ArgumentList $JAVA_OPTS `
-RedirectStandardOutput "$PSScriptRoot\quarkus.log" `
-RedirectStandardError "$PSScriptRoot\quarkus.log.err" `
-NoNewWindow -PassThru
Write-Host "Started $($Process.Id)"
$Process.Id | Out-File -FilePath $PIDFILE

24
run-wrapper/stop.ps1 Normal file
View File

@@ -0,0 +1,24 @@
$ErrorActionPreference = 'Stop'
$PIDFILE = Join-Path $PSScriptRoot ".pid"
if (-Not (Test-Path $PIDFILE)) {
Write-Host "Not running"
exit 2
}
$ReadPID = Get-Content $PIDFILE
if (-Not (Get-Process -Id $ReadPID -ErrorAction SilentlyContinue)) {
Write-Host "Not running"
Remove-Item $PIDFILE -Force
exit 2
}
Write-Host "Killing $ReadPID"
# TODO: Graceful shutdown
Stop-Process -Id $ReadPID
Remove-Item $PIDFILE -Force

View File

@@ -40,10 +40,10 @@ wget https://nightly.link/usatiuk/dhfs/actions/runs/$LATEST/Run%20wrapper.zip
unzip "Run wrapper.zip"
rm "Run wrapper.zip"
tar xvf "run-wrapper.tar.gz" --strip-components=2
tar xvf "run-wrapper.tar.gz" --strip-components 2
rm "run-wrapper.tar.gz"
rm -rf "DHFS Package"
rm -rf "Server"
rm -rf "Webui"
rm -rf "NativeLibs"

50
run-wrapper/update.ps1 Normal file
View File

@@ -0,0 +1,50 @@
$ErrorActionPreference = 'Stop'
$PIDFILE = Join-Path $PSScriptRoot ".pid"
$VERSION_FILE = Join-Path $PSScriptRoot "version"
if (Test-Path $PIDFILE) {
$ReadPID = Get-Content $PIDFILE
if (Get-Process -Id $ReadPID -ErrorAction SilentlyContinue) {
Write-Host "Already running: $ReadPID"
exit 2
}
}
$response = Invoke-RestMethod -Uri "https://api.github.com/repos/usatiuk/dhfs/actions/runs?branch=main&status=completed&per_page=1"
$LATEST = $response.workflow_runs[0].id
Write-Host "Latest: $LATEST"
$CUR = (Get-Content $VERSION_FILE -Raw).Trim()
Write-Host "Current: $CUR"
if ([long]$CUR -ge [long]$LATEST) {
Write-Host "Already latest!"
exit 1
}
Write-Host "Downloading..."
Set-Location $PSScriptRoot
$zipFile = "Run wrapper.zip"
$tarFile = "run-wrapper.tar.gz"
$dhfsDir = "dhfs"
Remove-Item $zipFile, $tarFile -Force -ErrorAction SilentlyContinue
Remove-Item $dhfsDir -Recurse -Force -ErrorAction SilentlyContinue
Invoke-WebRequest -Uri "https://nightly.link/usatiuk/dhfs/actions/runs/$LATEST/Run%20wrapper.zip" -OutFile $zipFile
Expand-Archive -LiteralPath $zipFile -DestinationPath $PSScriptRoot
Remove-Item $zipFile -Force
tar -xf $tarFile --strip-components=2
Remove-Item $tarFile -Force
Remove-Item "Server", "Webui", "NativeLibs" -Recurse -Force -ErrorAction SilentlyContinue
Move-Item "$dhfsDir\app\*" . -Force
Remove-Item $dhfsDir -Recurse -Force
Write-Host "Update complete!"