mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-29 04:57:48 +01:00
Compare commits
27 Commits
b84ef95703
...
javadoc-pu
| Author | SHA1 | Date | |
|---|---|---|---|
| 7902d9e486 | |||
| 7c056b9674 | |||
| 8cc040b234 | |||
| f8375c9cd8 | |||
| 0c3524851e | |||
| 3eb7164c0f | |||
| f544a67fb5 | |||
| 964b3da951 | |||
| cb33472dc5 | |||
| de211bb2d2 | |||
| 56ab3bad4c | |||
| 9403556220 | |||
| 469a6b9011 | |||
| 52ccbb99bc | |||
| d972cd1562 | |||
| 80151bcca5 | |||
| 289a2b880e | |||
| 0849df60ae | |||
| 9cb5c226f9 | |||
| 87c404828c | |||
| b074e8eb44 | |||
| eb5b0ae03c | |||
| c329c1f982 | |||
| 4e7b13227b | |||
| db51d7280c | |||
| 70fecb389b | |||
| 6e9a2b25f6 |
53
.github/workflows/server.yml
vendored
53
.github/workflows/server.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
submodules: "recursive"
|
||||
|
||||
- name: Install sudo for ACT
|
||||
run: apt-get update && apt-get install -y sudo
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
|
||||
- name: User allow other for fuse
|
||||
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
|
||||
|
||||
|
||||
- name: Dump fuse.conf
|
||||
run: cat /etc/fuse.conf
|
||||
|
||||
@@ -43,11 +43,11 @@ jobs:
|
||||
distribution: "zulu"
|
||||
cache: maven
|
||||
|
||||
- name: Build LazyFS
|
||||
run: cd thirdparty/lazyfs/ && ./build.sh
|
||||
# - name: Build LazyFS
|
||||
# run: cd thirdparty/lazyfs/ && ./build.sh
|
||||
|
||||
- name: Test with Maven
|
||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
|
||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots javadoc:aggregate
|
||||
|
||||
# - name: Build with Maven
|
||||
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
|
||||
@@ -55,7 +55,12 @@ jobs:
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-parent/dhfs-app/target/quarkus-app
|
||||
path: dhfs-parent/dhfs-fuse/target/quarkus-app
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Javadocs
|
||||
path: dhfs-parent/target/reports/apidocs/
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ always() }}
|
||||
@@ -212,7 +217,7 @@ jobs:
|
||||
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
|
||||
|
||||
- name: Copy DHFS
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
|
||||
|
||||
- name: Copy Webui
|
||||
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
|
||||
@@ -231,3 +236,37 @@ jobs:
|
||||
with:
|
||||
name: Run wrapper
|
||||
path: ~/run-wrapper.tar.gz
|
||||
|
||||
publish-javadoc:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
needs: [build-webui, build-dhfs]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: DHFS Javadocs
|
||||
path: dhfs-javadocs-downloaded
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
# Upload entire repository
|
||||
path: 'dhfs-javadocs-downloaded'
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
|
||||
|
||||
2
dhfs-parent/.gitignore
vendored
2
dhfs-parent/.gitignore
vendored
@@ -41,3 +41,5 @@ nb-configuration.xml
|
||||
|
||||
# Plugin directory
|
||||
/.quarkus/cli/plugins/
|
||||
|
||||
.jqwik-database
|
||||
@@ -1,8 +1,8 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||
<module name="dhfs-fuse" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||
<module name="dhfs-fuse" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
*
|
||||
!target/*-runner
|
||||
!target/*-runner.jar
|
||||
!target/lib/*
|
||||
!target/quarkus-app/*
|
||||
43
dhfs-parent/dhfs-app/.gitignore
vendored
43
dhfs-parent/dhfs-app/.gitignore
vendored
@@ -1,43 +0,0 @@
|
||||
#Maven
|
||||
target/
|
||||
pom.xml.tag
|
||||
pom.xml.releaseBackup
|
||||
pom.xml.versionsBackup
|
||||
release.properties
|
||||
.flattened-pom.xml
|
||||
|
||||
# Eclipse
|
||||
.project
|
||||
.classpath
|
||||
.settings/
|
||||
bin/
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
*.ipr
|
||||
*.iml
|
||||
*.iws
|
||||
|
||||
# NetBeans
|
||||
nb-configuration.xml
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode
|
||||
.factorypath
|
||||
|
||||
# OSX
|
||||
.DS_Store
|
||||
|
||||
# Vim
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# patch
|
||||
*.orig
|
||||
*.rej
|
||||
|
||||
# Local environment
|
||||
.env
|
||||
|
||||
# Plugin directory
|
||||
/.quarkus/cli/plugins/
|
||||
@@ -1,2 +0,0 @@
|
||||
FROM azul/zulu-openjdk-debian:21-jre-latest
|
||||
RUN apt update && apt install -y libfuse2 curl
|
||||
@@ -1,43 +0,0 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
dhfs1:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs1:/dhfs_root
|
||||
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 8081:8443
|
||||
- 5005:5005
|
||||
dhfs2:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs2:/dhfs_root
|
||||
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8090:8080
|
||||
- 8091:8443
|
||||
- 5010:5010
|
||||
@@ -1,172 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-app</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter-params</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.openhft</groupId>
|
||||
<artifactId>zero-allocation-hashing</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-scheduler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.slf4j</groupId>
|
||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fuse</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<extensions>true</extensions>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,97 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
# We make four distinct layers so if there are application changes the library layers can be re-used
|
||||
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
|
||||
COPY --chown=185 target/quarkus-app/*.jar /deployments/
|
||||
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
|
||||
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
COPY target/lib/* /deployments/lib/
|
||||
COPY target/*-runner.jar /deployments/quarkus-run.jar
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
@@ -1,27 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -1,30 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
# It uses a micro base image, tuned for Quarkus native executables.
|
||||
# It reduces the size of the resulting container image.
|
||||
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM quay.io/quarkus/quarkus-micro-image:2.0
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -1,35 +0,0 @@
|
||||
quarkus.grpc.server.use-separate-server=false
|
||||
dhfs.objects.peerdiscovery.port=42069
|
||||
dhfs.objects.peerdiscovery.interval=4s
|
||||
dhfs.objects.peerdiscovery.broadcast=true
|
||||
dhfs.objects.sync.timeout=30
|
||||
dhfs.objects.sync.ping.timeout=5
|
||||
dhfs.objects.invalidation.threads=16
|
||||
dhfs.objects.invalidation.delay=1000
|
||||
dhfs.objects.reconnect_interval=5s
|
||||
dhfs.objects.write_log=false
|
||||
dhfs.objects.periodic-push-op-interval=5m
|
||||
dhfs.fuse.root=${HOME}/dhfs_default/fuse
|
||||
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=524288
|
||||
dhfs.files.max_chunk_size=524288
|
||||
dhfs.files.target_chunk_alignment=17
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.files.use_hash_for_chunks=false
|
||||
dhfs.objects.autosync.threads=16
|
||||
dhfs.objects.autosync.download-all=false
|
||||
dhfs.objects.move-processor.threads=16
|
||||
dhfs.objects.ref-processor.threads=16
|
||||
dhfs.objects.opsender.batch-size=100
|
||||
dhfs.objects.lock_timeout_secs=2
|
||||
dhfs.local-discovery=true
|
||||
dhfs.peerdiscovery.timeout=10000
|
||||
quarkus.log.category."com.usatiuk".min-level=TRACE
|
||||
quarkus.log.category."com.usatiuk".level=TRACE
|
||||
quarkus.http.insecure-requests=enabled
|
||||
quarkus.http.ssl.client-auth=required
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfsapp;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
abstract public class TempDataProfile implements QuarkusTestProfile {
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
}
|
||||
|
||||
@Override
|
||||
final public Map<String, String> getConfigOverrides() {
|
||||
Path tempDirWithPrefix;
|
||||
try {
|
||||
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var ret = new HashMap<String, String>();
|
||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
||||
getConfigOverrides(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package com.usatiuk.dhfsapp;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Objects;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TestDataCleaner {
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
String tempDirectory;
|
||||
|
||||
public static void purgeDirectory(File dir) {
|
||||
try {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.error("Couldn't purge directory " + dir, e);
|
||||
}
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
||||
try {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
} catch (Exception ignored) {
|
||||
Log.warn("Couldn't cleanup test data on init");
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
|
||||
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
|
||||
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.objects.deletion.delay=0
|
||||
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
|
||||
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
|
||||
quarkus.http.test-port=0
|
||||
quarkus.http.test-ssl-port=0
|
||||
dhfs.local-discovery=false
|
||||
dhfs.objects.persistence.snapshot-extra-checks=true
|
||||
@@ -5,6 +5,11 @@ import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
/**
|
||||
* ChunkData is a data structure that represents an immutable binary blob
|
||||
* @param key unique key
|
||||
* @param data binary data
|
||||
*/
|
||||
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.persistence.ChunkDataP;
|
||||
import com.usatiuk.dhfs.persistence.JObjectKeyP;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class ChunkDataProtoSerializer implements ProtoSerializer<ChunkDataP, ChunkData> {
|
||||
@Override
|
||||
public ChunkData deserialize(ChunkDataP message) {
|
||||
return new ChunkData(
|
||||
JObjectKey.of(message.getKey().getName()),
|
||||
message.getData()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChunkDataP serialize(ChunkData object) {
|
||||
return ChunkDataP.newBuilder()
|
||||
.setKey(JObjectKeyP.newBuilder().setName(object.key().value()).build())
|
||||
.setData(object.data())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,14 @@ import com.usatiuk.objects.JObjectKey;
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* File is a data structure that represents a file in the file system
|
||||
* @param key unique key
|
||||
* @param mode file mode
|
||||
* @param cTime creation time
|
||||
* @param mTime modification time
|
||||
* @param symlink true if the file is a symlink, false otherwise
|
||||
*/
|
||||
public record File(JObjectKey key, long mode, long cTime, long mTime,
|
||||
boolean symlink
|
||||
) implements JDataRemote, JMapHolder<JMapLongKey> {
|
||||
|
||||
@@ -7,6 +7,11 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* FileDto is a data transfer object that contains a file and its chunks.
|
||||
* @param file the file
|
||||
* @param chunks the list of chunks, each represented as a pair of a long and a JObjectKey
|
||||
*/
|
||||
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
|
||||
@Override
|
||||
public Class<? extends JDataRemote> objClass() {
|
||||
|
||||
@@ -5,6 +5,9 @@ import com.usatiuk.dhfs.syncmap.DtoMapper;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
/**
|
||||
* Maps a {@link File} object to a {@link FileDto} object and vice versa.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileDtoMapper implements DtoMapper<File, FileDto> {
|
||||
@Inject
|
||||
|
||||
@@ -10,11 +10,20 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Helper class for working with files.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileHelper {
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
/**
|
||||
* Get the chunks of a file.
|
||||
* Transaction is expected to be already started.
|
||||
* @param file the file to get chunks from
|
||||
* @return a list of pairs of chunk offset and chunk key
|
||||
*/
|
||||
public List<Pair<Long, JObjectKey>> getChunks(File file) {
|
||||
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
|
||||
try (var it = jMapHelper.getIterator(file)) {
|
||||
@@ -26,6 +35,13 @@ public class FileHelper {
|
||||
return List.copyOf(chunks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace the chunks of a file.
|
||||
* All previous chunks will be deleted.
|
||||
* Transaction is expected to be already started.
|
||||
* @param file the file to replace chunks in
|
||||
* @param chunks the list of pairs of chunk offset and chunk key
|
||||
*/
|
||||
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
|
||||
jMapHelper.deleteAll(file);
|
||||
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.persistence.FileDtoP;
|
||||
import com.usatiuk.utils.SerializationHelper;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@Singleton
|
||||
public class FileProtoSerializer implements ProtoSerializer<FileDtoP, FileDto> {
|
||||
@Override
|
||||
public FileDto deserialize(FileDtoP message) {
|
||||
try (var is = message.getSerializedData().newInput()) {
|
||||
return SerializationHelper.deserialize(is);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileDtoP serialize(FileDto object) {
|
||||
return FileDtoP.newBuilder().setSerializedData(SerializationHelper.serialize(object)).build();
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import com.usatiuk.dhfs.remoteobj.*;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
@@ -24,6 +23,9 @@ import javax.annotation.Nullable;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Handles synchronization of file objects.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
@Inject
|
||||
@@ -42,14 +44,18 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
private JKleppmannTreeManager.JKleppmannTree getTree() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs")).orElseThrow();
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC).orElseThrow();
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve conflict between two file versions, update the file in storage and create a conflict file.
|
||||
*
|
||||
* @param from the peer that sent the update
|
||||
* @param key the key of the file
|
||||
* @param receivedChangelog the changelog of the received file
|
||||
* @param receivedData the received file data
|
||||
*/
|
||||
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
|
||||
@Nullable FileDto receivedData) {
|
||||
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
|
||||
@@ -131,12 +137,12 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
|
||||
do {
|
||||
try {
|
||||
getTreeW().move(parent.getRight(),
|
||||
getTree().move(parent.getRight(),
|
||||
new JKleppmannTreeNodeMetaFile(
|
||||
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
|
||||
newFile.key()
|
||||
),
|
||||
getTreeW().getNewNodeId()
|
||||
getTree().getNewNodeId()
|
||||
);
|
||||
} catch (AlreadyExistsException aex) {
|
||||
i++;
|
||||
|
||||
@@ -6,6 +6,10 @@ import com.usatiuk.objects.JObjectKey;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* JKleppmannTreeNodeMetaDirectory is a record that represents a directory in the JKleppmann tree.
|
||||
* @param name the name of the directory
|
||||
*/
|
||||
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaDirectory(name);
|
||||
|
||||
@@ -6,6 +6,11 @@ import com.usatiuk.objects.JObjectKey;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* JKleppmannTreeNodeMetaFile is a record that represents a file in the JKleppmann tree.
|
||||
* @param name the name of the file
|
||||
* @param fileIno a reference to the `File` object
|
||||
*/
|
||||
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
|
||||
@Override
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
|
||||
@@ -2,47 +2,762 @@ package com.usatiuk.dhfsfs.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.jmap.JMapEntry;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.dhfsfs.objects.ChunkData;
|
||||
import com.usatiuk.dhfsfs.objects.File;
|
||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
public interface DhfsFileService {
|
||||
Optional<JObjectKey> open(String name);
|
||||
/**
|
||||
* Actual filesystem implementation.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class DhfsFileService {
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
|
||||
int targetChunkAlignment;
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
|
||||
int maxChunkSize;
|
||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||
boolean useHashForChunks;
|
||||
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
|
||||
boolean allowRecursiveDelete;
|
||||
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
||||
boolean refVerification;
|
||||
@ConfigProperty(name = "dhfs.objects.write_log")
|
||||
boolean writeLogging;
|
||||
|
||||
Optional<JObjectKey> create(String name, long mode);
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
RemoteTransaction remoteTx;
|
||||
@Inject
|
||||
TransactionManager jObjectTxManager;
|
||||
@Inject
|
||||
JKleppmannTreeManager jKleppmannTreeManager;
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
Pair<String, JObjectKey> inoToParent(JObjectKey ino);
|
||||
|
||||
void mkdir(String name, long mode);
|
||||
|
||||
Optional<GetattrRes> getattr(JObjectKey name);
|
||||
|
||||
Boolean chmod(JObjectKey name, long mode);
|
||||
|
||||
void unlink(String name);
|
||||
|
||||
Boolean rename(String from, String to);
|
||||
|
||||
Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs);
|
||||
|
||||
Iterable<String> readDir(String name);
|
||||
|
||||
long size(JObjectKey fileUuid);
|
||||
|
||||
ByteString read(JObjectKey fileUuid, long offset, int length);
|
||||
|
||||
Long write(JObjectKey fileUuid, long offset, ByteString data);
|
||||
|
||||
default Long write(JObjectKey fileUuid, long offset, byte[] data) {
|
||||
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
|
||||
private JKleppmannTreeManager.JKleppmannTree getTree() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
|
||||
}
|
||||
|
||||
Boolean truncate(JObjectKey fileUuid, long length);
|
||||
/**
|
||||
* Create a new chunk with the given data and a new unique ID.
|
||||
*
|
||||
* @param bytes the data to store in the chunk
|
||||
* @return the created chunk
|
||||
*/
|
||||
private ChunkData createChunk(ByteString bytes) {
|
||||
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
|
||||
remoteTx.putDataNew(newChunk);
|
||||
return newChunk;
|
||||
}
|
||||
|
||||
String readlink(JObjectKey uuid);
|
||||
void init(@Observes @Priority(500) StartupEvent event) {
|
||||
Log.info("Initializing file service");
|
||||
getTree();
|
||||
}
|
||||
|
||||
ByteString readlinkBS(JObjectKey uuid);
|
||||
private JKleppmannTreeNode getDirEntry(String name) {
|
||||
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
JObjectKey symlink(String oldpath, String newpath);
|
||||
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
|
||||
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) return Optional.empty();
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the attributes of a file or directory.
|
||||
* @param uuid the UUID of the file or directory
|
||||
* @return the attributes of the file or directory
|
||||
*/
|
||||
public Optional<GetattrRes> getattr(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var ref = curTx.get(JData.class, uuid).orElse(null);
|
||||
if (ref == null) return Optional.empty();
|
||||
GetattrRes ret;
|
||||
if (ref instanceof RemoteObjectMeta r) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE);
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||
}
|
||||
} else if (ref instanceof JKleppmannTreeNodeHolder) {
|
||||
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||
}
|
||||
return Optional.of(ret);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to resolve a path to a file or directory.
|
||||
* @param name the path to resolve
|
||||
* @return the key of the file or directory, or an empty optional if it does not exist
|
||||
*/
|
||||
public Optional<JObjectKey> open(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
try {
|
||||
var ret = getDirEntry(name);
|
||||
return switch (ret.meta()) {
|
||||
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
|
||||
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
|
||||
default -> Optional.empty();
|
||||
};
|
||||
} catch (StatusRuntimeException e) {
|
||||
if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
|
||||
return Optional.empty();
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void ensureDir(JKleppmannTreeNode entry) {
|
||||
if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory))
|
||||
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new file with the given name and mode.
|
||||
* @param name the name of the file
|
||||
* @param mode the mode of the file
|
||||
* @return the key of the created file
|
||||
*/
|
||||
public Optional<JObjectKey> create(String name, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
String fname = path.getFileName().toString();
|
||||
|
||||
var fuuid = UUID.randomUUID();
|
||||
Log.debug("Creating file " + fuuid);
|
||||
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
|
||||
remoteTx.putData(f);
|
||||
|
||||
try {
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
|
||||
} catch (Exception e) {
|
||||
// fobj.getMeta().removeRef(newNodeId);
|
||||
throw e;
|
||||
}
|
||||
return Optional.of(f.key());
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the parent directory of a file or directory.
|
||||
* @param ino the key of the file or directory
|
||||
* @return the parent directory
|
||||
*/
|
||||
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
// FIXME: Slow
|
||||
return getTree().findParent(w -> {
|
||||
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
|
||||
return f.fileIno().equals(ino);
|
||||
return false;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new directory with the given name and mode.
|
||||
* @param name the name of the directory
|
||||
* @param mode the mode of the directory
|
||||
*/
|
||||
public void mkdir(String name, long mode) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
ensureDir(parent);
|
||||
|
||||
String dname = path.getFileName().toString();
|
||||
|
||||
Log.debug("Creating directory " + name);
|
||||
|
||||
// TODO: No modes for directories yet
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId());
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlink a file or directory.
|
||||
* @param name the name of the file or directory
|
||||
* @throws DirectoryNotEmptyException if the directory is not empty and recursive delete is not allowed
|
||||
*/
|
||||
public void unlink(String name) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntryOpt(name).orElse(null);
|
||||
if (node == null)
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
|
||||
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
|
||||
if (!allowRecursiveDelete && !node.children().isEmpty())
|
||||
throw new DirectoryNotEmptyException();
|
||||
}
|
||||
getTree().trash(node.meta(), node.key());
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Rename a file or directory.
|
||||
* @param from the old name
|
||||
* @param to the new name
|
||||
* @return true if the rename was successful, false otherwise
|
||||
*/
|
||||
public Boolean rename(String from, String to) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntry(from);
|
||||
JKleppmannTreeNodeMeta meta = node.meta();
|
||||
|
||||
var toPath = Path.of(to);
|
||||
var toDentry = getDirEntry(toPath.getParent().toString());
|
||||
ensureDir(toDentry);
|
||||
|
||||
getTree().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the mode of a file or directory.
|
||||
* @param uuid the ID of the file or directory
|
||||
* @param mode the new mode
|
||||
* @return true if the mode was changed successfully, false otherwise
|
||||
*/
|
||||
public Boolean chmod(JObjectKey uuid, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
remoteTx.putData(f.withMode(mode).withCurrentMTime());
|
||||
return true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(uuid + " is not a file");
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException(uuid + " is not a file");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the contents of a directory.
|
||||
* @param name the path of the directory
|
||||
* @return an iterable of the names of the files in the directory
|
||||
*/
|
||||
public Iterable<String> readDir(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var found = getDirEntry(name);
|
||||
|
||||
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
|
||||
return found.children().keySet();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the contents of a file.
|
||||
* @param fileUuid the ID of the file
|
||||
* @param offset the offset to start reading from
|
||||
* @param length the number of bytes to read
|
||||
* @return the contents of the file as a ByteString
|
||||
*/
|
||||
public ByteString read(JObjectKey fileUuid, long offset, int length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||
if (offset < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to read: " + fileUuid);
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
|
||||
}
|
||||
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||
if (!it.hasNext())
|
||||
return ByteString.empty();
|
||||
|
||||
// if (it.peekNextKey().key() != offset) {
|
||||
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
|
||||
// return Optional.of(ByteString.empty());
|
||||
// }
|
||||
long curPos = offset;
|
||||
ByteString buf = ByteString.empty();
|
||||
|
||||
var chunk = it.next();
|
||||
|
||||
while (curPos < offset + length) {
|
||||
var chunkPos = chunk.getKey().key();
|
||||
|
||||
long offInChunk = curPos - chunkPos;
|
||||
|
||||
long toReadInChunk = (offset + length) - curPos;
|
||||
|
||||
var chunkBytes = readChunk(chunk.getValue().ref());
|
||||
|
||||
long readableLen = chunkBytes.size() - offInChunk;
|
||||
|
||||
var toReadReally = Math.min(readableLen, toReadInChunk);
|
||||
|
||||
if (toReadReally < 0) break;
|
||||
|
||||
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
|
||||
|
||||
curPos += toReadReally;
|
||||
|
||||
if (readableLen > toReadInChunk)
|
||||
break;
|
||||
|
||||
if (!it.hasNext()) break;
|
||||
|
||||
chunk = it.next();
|
||||
}
|
||||
|
||||
return buf;
|
||||
} catch (Exception e) {
|
||||
Log.error("Error reading file: " + fileUuid, e);
|
||||
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the size of a file.
|
||||
* @param uuid the ID of the file
|
||||
* @return the size of the file
|
||||
*/
|
||||
private ByteString readChunk(JObjectKey uuid) {
|
||||
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
|
||||
|
||||
if (chunkRead == null) {
|
||||
Log.error("Chunk requested not found: " + uuid);
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND);
|
||||
}
|
||||
|
||||
return chunkRead.data();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the size of a chunk.
|
||||
* @param uuid the ID of the chunk
|
||||
* @return the size of the chunk
|
||||
*/
|
||||
private int getChunkSize(JObjectKey uuid) {
|
||||
return readChunk(uuid).size();
|
||||
}
|
||||
|
||||
private long alignDown(long num, long n) {
|
||||
return num & -(1L << n);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write data to a file.
|
||||
* @param fileUuid the ID of the file
|
||||
* @param offset the offset to write to
|
||||
* @param data the data to write
|
||||
* @return the number of bytes written
|
||||
*/
|
||||
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (offset < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
|
||||
}
|
||||
|
||||
Map<Long, JObjectKey> removedChunks = new HashMap<>();
|
||||
|
||||
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
||||
long writeEnd = offset + data.size();
|
||||
long start = realOffset;
|
||||
long existingEnd = 0;
|
||||
ByteString pendingPrefix = ByteString.empty();
|
||||
ByteString pendingSuffix = ByteString.empty();
|
||||
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
|
||||
while (it.hasNext()) {
|
||||
var curEntry = it.next();
|
||||
long curChunkStart = curEntry.getKey().key();
|
||||
var curChunkId = curEntry.getValue().ref();
|
||||
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
|
||||
existingEnd = curChunkEnd;
|
||||
if (curChunkEnd <= realOffset) break;
|
||||
|
||||
removedChunks.put(curEntry.getKey().key(), curChunkId);
|
||||
|
||||
if (curChunkStart < offset) {
|
||||
if (curChunkStart < start)
|
||||
start = curChunkStart;
|
||||
|
||||
var readChunk = readChunk(curChunkId);
|
||||
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
|
||||
}
|
||||
|
||||
if (curChunkEnd > writeEnd) {
|
||||
var readChunk = readChunk(curChunkId);
|
||||
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
|
||||
}
|
||||
|
||||
if (curChunkEnd >= writeEnd) break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Map<Long, JObjectKey> newChunks = new HashMap<>();
|
||||
|
||||
if (existingEnd < offset) {
|
||||
if (!pendingPrefix.isEmpty()) {
|
||||
int diff = Math.toIntExact(offset - existingEnd);
|
||||
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
|
||||
} else {
|
||||
fillZeros(existingEnd, offset, newChunks);
|
||||
start = offset;
|
||||
}
|
||||
}
|
||||
|
||||
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
|
||||
|
||||
int combinedSize = pendingWrites.size();
|
||||
|
||||
{
|
||||
int cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
int end;
|
||||
|
||||
if (combinedSize - cur < maxChunkSize)
|
||||
end = combinedSize;
|
||||
else if (targetChunkAlignment < 0)
|
||||
end = combinedSize;
|
||||
else
|
||||
end = Math.min(cur + targetChunkSize, combinedSize);
|
||||
|
||||
var thisChunk = pendingWrites.substring(cur, end);
|
||||
|
||||
ChunkData newChunkData = createChunk(thisChunk);
|
||||
newChunks.put(start, newChunkData.key());
|
||||
|
||||
start += thisChunk.size();
|
||||
cur = end;
|
||||
}
|
||||
}
|
||||
|
||||
for (var e : removedChunks.entrySet()) {
|
||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||
}
|
||||
|
||||
for (var e : newChunks.entrySet()) {
|
||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||
}
|
||||
|
||||
remoteTx.putData(file.withCurrentMTime());
|
||||
|
||||
return (long) data.size();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate a file to the given length.
|
||||
* @param fileUuid the ID of the file
|
||||
* @param length the new length of the file
|
||||
* @return true if the truncate was successful, false otherwise
|
||||
*/
|
||||
public Boolean truncate(JObjectKey fileUuid, long length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to write: " + fileUuid);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (length == 0) {
|
||||
jMapHelper.deleteAll(file);
|
||||
remoteTx.putData(file);
|
||||
return true;
|
||||
}
|
||||
|
||||
var curSize = size(fileUuid);
|
||||
if (curSize == length) return true;
|
||||
|
||||
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||
|
||||
if (curSize < length) {
|
||||
fillZeros(curSize, length, newChunks);
|
||||
} else {
|
||||
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
|
||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) {
|
||||
last = it.hasNext() ? it.next() : null;
|
||||
while (it.hasNext()) {
|
||||
var next = it.next();
|
||||
removedChunks.put(next.getKey().key(), next.getValue().ref());
|
||||
}
|
||||
}
|
||||
removedChunks.put(last.getKey().key(), last.getValue().ref());
|
||||
//
|
||||
// NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||
//
|
||||
// long start = 0;
|
||||
//
|
||||
// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||
// first = it.hasNext() ? it.next() : null;
|
||||
// boolean empty = last == null;
|
||||
// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
|
||||
// first = null;
|
||||
// last = null;
|
||||
// start = offset;
|
||||
// } else if (!empty) {
|
||||
// assert first != null;
|
||||
// removedChunks.put(first.getKey().key(), first.getValue().ref());
|
||||
// while (it.hasNext() && it.peekNextKey() != last.getKey()) {
|
||||
// var next = it.next();
|
||||
// removedChunks.put(next.getKey().key(), next.getValue().ref());
|
||||
// }
|
||||
// removedChunks.put(last.getKey().key(), last.getValue().ref());
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// var tail = chunksAll.lowerEntry(length);
|
||||
// var afterTail = chunksAll.tailMap(tail.getKey(), false);
|
||||
//
|
||||
// removedChunks.put(tail.getKey(), tail.getValue());
|
||||
// removedChunks.putAll(afterTail);
|
||||
|
||||
var tailBytes = readChunk(last.getValue().ref());
|
||||
var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key()));
|
||||
|
||||
ChunkData newChunkData = createChunk(newChunk);
|
||||
newChunks.put(last.getKey().key(), newChunkData.key());
|
||||
}
|
||||
|
||||
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
|
||||
|
||||
for (var e : removedChunks.entrySet()) {
|
||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||
}
|
||||
|
||||
for (var e : newChunks.entrySet()) {
|
||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||
}
|
||||
|
||||
remoteTx.putData(file.withCurrentMTime());
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill the given range with zeroes.
|
||||
* @param fillStart the start of the range
|
||||
* @param length the end of the range
|
||||
* @param newChunks the map to store the new chunks in
|
||||
*/
|
||||
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
|
||||
long combinedSize = (length - fillStart);
|
||||
|
||||
long start = fillStart;
|
||||
|
||||
// Hack
|
||||
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
|
||||
|
||||
{
|
||||
long cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
long end;
|
||||
|
||||
if (targetChunkSize <= 0)
|
||||
end = combinedSize;
|
||||
else {
|
||||
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
||||
end = cur + targetChunkSize;
|
||||
} else {
|
||||
end = combinedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (!zeroCache.containsKey(end - cur))
|
||||
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
|
||||
|
||||
ChunkData newChunkData = zeroCache.get(end - cur);
|
||||
newChunks.put(start, newChunkData.key());
|
||||
|
||||
start += newChunkData.data().size();
|
||||
cur = end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the contents of a symlink.
|
||||
* @param uuid the ID of the symlink
|
||||
* @return the contents of the symlink as a string
|
||||
*/
|
||||
public String readlink(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return readlinkBS(uuid).toStringUtf8();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the contents of a symlink as a ByteString.
|
||||
* @param uuid the ID of the symlink
|
||||
* @return the contents of the symlink as a ByteString
|
||||
*/
|
||||
public ByteString readlinkBS(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
|
||||
return read(uuid, 0, Math.toIntExact(size(uuid)));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a symlink.
|
||||
* @param oldpath the target of the symlink
|
||||
* @param newpath the path of the symlink
|
||||
* @return the key of the created symlink
|
||||
*/
|
||||
public JObjectKey symlink(String oldpath, String newpath) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(newpath);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
String fname = path.getFileName().toString();
|
||||
|
||||
var fuuid = UUID.randomUUID();
|
||||
Log.debug("Creating file " + fuuid);
|
||||
|
||||
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
|
||||
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
|
||||
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
|
||||
|
||||
remoteTx.putData(f);
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
|
||||
return f.key();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the access and modification times of a file.
|
||||
* @param fileUuid the ID of the file
|
||||
* @param atimeMs the access time in milliseconds
|
||||
* @param mtimeMs the modification time in milliseconds
|
||||
* @return true if the times were set successfully, false otherwise
|
||||
*/
|
||||
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
// FIXME:
|
||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
|
||||
return true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(fileUuid + " is not a file");
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException(fileUuid + " is not a file");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the size of a file.
|
||||
* @param fileUuid the ID of the file
|
||||
* @return the size of the file
|
||||
*/
|
||||
public long size(JObjectKey fileUuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
long realSize = 0;
|
||||
var file = remoteTx.getData(File.class, fileUuid)
|
||||
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
|
||||
|
||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
|
||||
last = it.hasNext() ? it.next() : null;
|
||||
}
|
||||
|
||||
if (last != null) {
|
||||
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
|
||||
}
|
||||
|
||||
return realSize;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Write data to a file.
|
||||
* @param fileUuid the ID of the file
|
||||
* @param offset the offset to write to
|
||||
* @param data the data to write
|
||||
* @return the number of bytes written
|
||||
*/
|
||||
public Long write(JObjectKey fileUuid, long offset, byte[] data) {
|
||||
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,665 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.jmap.JMapEntry;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.dhfsfs.objects.ChunkData;
|
||||
import com.usatiuk.dhfsfs.objects.File;
|
||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
@ApplicationScoped
|
||||
public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
RemoteTransaction remoteTx;
|
||||
@Inject
|
||||
TransactionManager jObjectTxManager;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
|
||||
int targetChunkAlignment;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
|
||||
int maxChunkSize;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||
boolean useHashForChunks;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
|
||||
boolean allowRecursiveDelete;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
||||
boolean refVerification;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.write_log")
|
||||
boolean writeLogging;
|
||||
|
||||
@Inject
|
||||
JKleppmannTreeManager jKleppmannTreeManager;
|
||||
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC, () -> new JKleppmannTreeNodeMetaDirectory(""));
|
||||
}
|
||||
|
||||
private ChunkData createChunk(ByteString bytes) {
|
||||
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
|
||||
remoteTx.putDataNew(newChunk);
|
||||
return newChunk;
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(500) StartupEvent event) {
|
||||
Log.info("Initializing file service");
|
||||
getTreeW();
|
||||
}
|
||||
|
||||
private JKleppmannTreeNode getDirEntryW(String name) {
|
||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private JKleppmannTreeNode getDirEntryR(String name) {
|
||||
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
|
||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) return Optional.empty();
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<GetattrRes> getattr(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var ref = curTx.get(JData.class, uuid).orElse(null);
|
||||
if (ref == null) return Optional.empty();
|
||||
GetattrRes ret;
|
||||
if (ref instanceof RemoteObjectMeta r) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE);
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||
}
|
||||
} else if (ref instanceof JKleppmannTreeNodeHolder) {
|
||||
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||
}
|
||||
return Optional.of(ret);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<JObjectKey> open(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
try {
|
||||
var ret = getDirEntryR(name);
|
||||
return switch (ret.meta()) {
|
||||
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
|
||||
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
|
||||
default -> Optional.empty();
|
||||
};
|
||||
} catch (StatusRuntimeException e) {
|
||||
if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
|
||||
return Optional.empty();
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void ensureDir(JKleppmannTreeNode entry) {
|
||||
if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory))
|
||||
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<JObjectKey> create(String name, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
String fname = path.getFileName().toString();
|
||||
|
||||
var fuuid = UUID.randomUUID();
|
||||
Log.debug("Creating file " + fuuid);
|
||||
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
|
||||
remoteTx.putData(f);
|
||||
|
||||
try {
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
||||
} catch (Exception e) {
|
||||
// fobj.getMeta().removeRef(newNodeId);
|
||||
throw e;
|
||||
}
|
||||
return Optional.of(f.key());
|
||||
});
|
||||
}
|
||||
|
||||
//FIXME: Slow..
|
||||
@Override
|
||||
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return getTreeW().findParent(w -> {
|
||||
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
|
||||
return f.fileIno().equals(ino);
|
||||
return false;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mkdir(String name, long mode) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
ensureDir(parent);
|
||||
|
||||
String dname = path.getFileName().toString();
|
||||
|
||||
Log.debug("Creating directory " + name);
|
||||
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlink(String name) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntryOpt(name).orElse(null);
|
||||
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
|
||||
if (!allowRecursiveDelete && !node.children().isEmpty())
|
||||
throw new DirectoryNotEmptyException();
|
||||
}
|
||||
getTreeW().trash(node.meta(), node.key());
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean rename(String from, String to) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntryW(from);
|
||||
JKleppmannTreeNodeMeta meta = node.meta();
|
||||
|
||||
var toPath = Path.of(to);
|
||||
var toDentry = getDirEntryW(toPath.getParent().toString());
|
||||
ensureDir(toDentry);
|
||||
|
||||
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean chmod(JObjectKey uuid, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
remoteTx.putData(f.withMode(mode).withCurrentMTime());
|
||||
return true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(uuid + " is not a file");
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException(uuid + " is not a file");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterable<String> readDir(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var found = getDirEntryW(name);
|
||||
|
||||
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
|
||||
return found.children().keySet();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteString read(JObjectKey fileUuid, long offset, int length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||
if (offset < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to read: " + fileUuid);
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
|
||||
}
|
||||
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||
if (!it.hasNext())
|
||||
return ByteString.empty();
|
||||
|
||||
// if (it.peekNextKey().key() != offset) {
|
||||
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
|
||||
// return Optional.of(ByteString.empty());
|
||||
// }
|
||||
long curPos = offset;
|
||||
ByteString buf = ByteString.empty();
|
||||
|
||||
var chunk = it.next();
|
||||
|
||||
while (curPos < offset + length) {
|
||||
var chunkPos = chunk.getKey().key();
|
||||
|
||||
long offInChunk = curPos - chunkPos;
|
||||
|
||||
long toReadInChunk = (offset + length) - curPos;
|
||||
|
||||
var chunkBytes = readChunk(chunk.getValue().ref());
|
||||
|
||||
long readableLen = chunkBytes.size() - offInChunk;
|
||||
|
||||
var toReadReally = Math.min(readableLen, toReadInChunk);
|
||||
|
||||
if (toReadReally < 0) break;
|
||||
|
||||
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
|
||||
|
||||
curPos += toReadReally;
|
||||
|
||||
if (readableLen > toReadInChunk)
|
||||
break;
|
||||
|
||||
if (!it.hasNext()) break;
|
||||
|
||||
chunk = it.next();
|
||||
}
|
||||
|
||||
return buf;
|
||||
} catch (Exception e) {
|
||||
Log.error("Error reading file: " + fileUuid, e);
|
||||
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private ByteString readChunk(JObjectKey uuid) {
|
||||
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
|
||||
|
||||
if (chunkRead == null) {
|
||||
Log.error("Chunk requested not found: " + uuid);
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND);
|
||||
}
|
||||
|
||||
return chunkRead.data();
|
||||
}
|
||||
|
||||
private int getChunkSize(JObjectKey uuid) {
|
||||
return readChunk(uuid).size();
|
||||
}
|
||||
|
||||
private long alignDown(long num, long n) {
|
||||
return num & -(1L << n);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (offset < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||
|
||||
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
|
||||
if (file == null) {
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
|
||||
}
|
||||
|
||||
Map<Long, JObjectKey> removedChunks = new HashMap<>();
|
||||
|
||||
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
||||
long writeEnd = offset + data.size();
|
||||
long start = realOffset;
|
||||
long existingEnd = 0;
|
||||
ByteString pendingPrefix = ByteString.empty();
|
||||
ByteString pendingSuffix = ByteString.empty();
|
||||
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
|
||||
while (it.hasNext()) {
|
||||
var curEntry = it.next();
|
||||
long curChunkStart = curEntry.getKey().key();
|
||||
var curChunkId = curEntry.getValue().ref();
|
||||
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
|
||||
existingEnd = curChunkEnd;
|
||||
if (curChunkEnd <= realOffset) break;
|
||||
|
||||
removedChunks.put(curEntry.getKey().key(), curChunkId);
|
||||
|
||||
if (curChunkStart < offset) {
|
||||
if (curChunkStart < start)
|
||||
start = curChunkStart;
|
||||
|
||||
var readChunk = readChunk(curChunkId);
|
||||
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
|
||||
}
|
||||
|
||||
if (curChunkEnd > writeEnd) {
|
||||
var readChunk = readChunk(curChunkId);
|
||||
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
|
||||
}
|
||||
|
||||
if (curChunkEnd >= writeEnd) break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Map<Long, JObjectKey> newChunks = new HashMap<>();
|
||||
|
||||
if (existingEnd < offset) {
|
||||
if (!pendingPrefix.isEmpty()) {
|
||||
int diff = Math.toIntExact(offset - existingEnd);
|
||||
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
|
||||
} else {
|
||||
fillZeros(existingEnd, offset, newChunks);
|
||||
start = offset;
|
||||
}
|
||||
}
|
||||
|
||||
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
|
||||
|
||||
int combinedSize = pendingWrites.size();
|
||||
|
||||
{
|
||||
int cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
int end;
|
||||
|
||||
if (combinedSize - cur < maxChunkSize)
|
||||
end = combinedSize;
|
||||
else if (targetChunkAlignment < 0)
|
||||
end = combinedSize;
|
||||
else
|
||||
end = Math.min(cur + targetChunkSize, combinedSize);
|
||||
|
||||
var thisChunk = pendingWrites.substring(cur, end);
|
||||
|
||||
ChunkData newChunkData = createChunk(thisChunk);
|
||||
newChunks.put(start, newChunkData.key());
|
||||
|
||||
start += thisChunk.size();
|
||||
cur = end;
|
||||
}
|
||||
}
|
||||
|
||||
for (var e : removedChunks.entrySet()) {
|
||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||
}
|
||||
|
||||
for (var e : newChunks.entrySet()) {
|
||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||
}
|
||||
|
||||
remoteTx.putData(file.withCurrentMTime());
|
||||
|
||||
return (long) data.size();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean truncate(JObjectKey fileUuid, long length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to write: " + fileUuid);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (length == 0) {
|
||||
jMapHelper.deleteAll(file);
|
||||
remoteTx.putData(file);
|
||||
return true;
|
||||
}
|
||||
|
||||
var curSize = size(fileUuid);
|
||||
if (curSize == length) return true;
|
||||
|
||||
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||
|
||||
if (curSize < length) {
|
||||
fillZeros(curSize, length, newChunks);
|
||||
} else {
|
||||
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
|
||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) {
|
||||
last = it.hasNext() ? it.next() : null;
|
||||
while (it.hasNext()) {
|
||||
var next = it.next();
|
||||
removedChunks.put(next.getKey().key(), next.getValue().ref());
|
||||
}
|
||||
}
|
||||
removedChunks.put(last.getKey().key(), last.getValue().ref());
|
||||
//
|
||||
// NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||
//
|
||||
// long start = 0;
|
||||
//
|
||||
// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||
// first = it.hasNext() ? it.next() : null;
|
||||
// boolean empty = last == null;
|
||||
// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
|
||||
// first = null;
|
||||
// last = null;
|
||||
// start = offset;
|
||||
// } else if (!empty) {
|
||||
// assert first != null;
|
||||
// removedChunks.put(first.getKey().key(), first.getValue().ref());
|
||||
// while (it.hasNext() && it.peekNextKey() != last.getKey()) {
|
||||
// var next = it.next();
|
||||
// removedChunks.put(next.getKey().key(), next.getValue().ref());
|
||||
// }
|
||||
// removedChunks.put(last.getKey().key(), last.getValue().ref());
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// var tail = chunksAll.lowerEntry(length);
|
||||
// var afterTail = chunksAll.tailMap(tail.getKey(), false);
|
||||
//
|
||||
// removedChunks.put(tail.getKey(), tail.getValue());
|
||||
// removedChunks.putAll(afterTail);
|
||||
|
||||
var tailBytes = readChunk(last.getValue().ref());
|
||||
var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key()));
|
||||
|
||||
ChunkData newChunkData = createChunk(newChunk);
|
||||
newChunks.put(last.getKey().key(), newChunkData.key());
|
||||
}
|
||||
|
||||
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
|
||||
|
||||
for (var e : removedChunks.entrySet()) {
|
||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||
}
|
||||
|
||||
for (var e : newChunks.entrySet()) {
|
||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||
}
|
||||
|
||||
remoteTx.putData(file.withCurrentMTime());
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
|
||||
long combinedSize = (length - fillStart);
|
||||
|
||||
long start = fillStart;
|
||||
|
||||
// Hack
|
||||
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
|
||||
|
||||
{
|
||||
long cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
long end;
|
||||
|
||||
if (targetChunkSize <= 0)
|
||||
end = combinedSize;
|
||||
else {
|
||||
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
||||
end = cur + targetChunkSize;
|
||||
} else {
|
||||
end = combinedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (!zeroCache.containsKey(end - cur))
|
||||
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
|
||||
|
||||
ChunkData newChunkData = zeroCache.get(end - cur);
|
||||
newChunks.put(start, newChunkData.key());
|
||||
|
||||
start += newChunkData.data().size();
|
||||
cur = end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readlink(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return readlinkBS(uuid).toStringUtf8();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteString readlinkBS(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
|
||||
return read(uuid, 0, Math.toIntExact(size(uuid)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey symlink(String oldpath, String newpath) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(newpath);
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
String fname = path.getFileName().toString();
|
||||
|
||||
var fuuid = UUID.randomUUID();
|
||||
Log.debug("Creating file " + fuuid);
|
||||
|
||||
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
|
||||
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
|
||||
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
|
||||
|
||||
remoteTx.putData(f);
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
||||
return f.key();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
// FIXME:
|
||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
|
||||
return true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(fileUuid + " is not a file");
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException(fileUuid + " is not a file");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public long size(JObjectKey fileUuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
long realSize = 0;
|
||||
var file = remoteTx.getData(File.class, fileUuid)
|
||||
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
|
||||
|
||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
|
||||
last = it.hasNext() ? it.next() : null;
|
||||
}
|
||||
|
||||
if (last != null) {
|
||||
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
|
||||
}
|
||||
|
||||
return realSize;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,10 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
/**
|
||||
* DirectoryNotEmptyException is thrown when a directory is not empty.
|
||||
* This exception is used to indicate that a directory cannot be deleted
|
||||
* because it contains files or subdirectories.
|
||||
*/
|
||||
public class DirectoryNotEmptyException extends RuntimeException {
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
|
||||
/**
|
||||
* GetattrRes is a record that represents the result of a getattr operation.
|
||||
* @param mtime File modification time
|
||||
* @param ctime File creation time
|
||||
* @param mode File mode
|
||||
* @param type File type
|
||||
*/
|
||||
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ public class TestDataCleaner {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
void purgeDirectory(File dir) {
|
||||
public void purgeDirectory(File dir) {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
|
||||
@@ -139,16 +139,13 @@
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>0.5C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
true
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
</junit.jupiter.execution.parallel.mode.default>
|
||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
0.5
|
||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
|
||||
@@ -23,14 +23,11 @@ import jnr.ffi.Pointer;
|
||||
import jnr.ffi.Runtime;
|
||||
import jnr.ffi.Struct;
|
||||
import jnr.ffi.types.off_t;
|
||||
import jnr.ffi.types.size_t;
|
||||
import org.apache.commons.lang3.SystemUtils;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import ru.serce.jnrfuse.ErrorCodes;
|
||||
import ru.serce.jnrfuse.FuseFillDir;
|
||||
import ru.serce.jnrfuse.FuseStubFS;
|
||||
import ru.serce.jnrfuse.NotImplemented;
|
||||
import ru.serce.jnrfuse.flags.FuseBufFlags;
|
||||
import ru.serce.jnrfuse.struct.*;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
@@ -246,9 +243,9 @@ public class DhfsFuse extends FuseStubFS {
|
||||
@Override
|
||||
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
|
||||
var buffer = UninitializedByteBuffer.allocate((int) size);
|
||||
UnsafeAccessor.get().getUnsafe().copyMemory(
|
||||
UnsafeAccessor.UNSAFE.copyMemory(
|
||||
buf.address(),
|
||||
UnsafeAccessor.get().getNioAccess().getBufferAddress(buffer),
|
||||
UnsafeAccessor.NIO.getBufferAddress(buffer),
|
||||
size
|
||||
);
|
||||
return write(path, buffer, offset, fi);
|
||||
|
||||
@@ -46,9 +46,9 @@ public class JnrPtrByteOutput extends ByteOutput {
|
||||
if (value instanceof MappedByteBuffer mb) {
|
||||
mb.load();
|
||||
}
|
||||
long addr = UnsafeAccessor.get().getNioAccess().getBufferAddress(value) + value.position();
|
||||
long addr = UnsafeAccessor.NIO.getBufferAddress(value) + value.position();
|
||||
var out = _backing.address() + _pos;
|
||||
UnsafeAccessor.get().getUnsafe().copyMemory(addr, out, rem);
|
||||
UnsafeAccessor.UNSAFE.copyMemory(addr, out, rem);
|
||||
} else {
|
||||
_backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp;
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import io.quarkus.runtime.Quarkus;
|
||||
import io.quarkus.runtime.QuarkusApplication;
|
||||
@@ -14,8 +14,9 @@ dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=2097152
|
||||
dhfs.files.target_chunk_alignment=19
|
||||
dhfs.files.target_chunk_size=524288
|
||||
dhfs.files.max_chunk_size=524288
|
||||
dhfs.files.target_chunk_alignment=17
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
|
||||
@@ -30,7 +30,7 @@ public class TestDataCleaner {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
void purgeDirectory(File dir) {
|
||||
public static void purgeDirectory(File dir) {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -168,35 +168,6 @@ public class DhfsFuseIT {
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
// TODO: How this fits with the tree?
|
||||
@Test
|
||||
@Disabled
|
||||
void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
@@ -221,6 +192,28 @@ public class DhfsFuseIT {
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTestKickedOut() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("kicked"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
Log.info("Deleting");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
Log.info("Deleted");
|
||||
|
||||
// FIXME?
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
Log.info("Creating");
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
@@ -79,6 +79,8 @@ public class DhfsImage implements Future<String> {
|
||||
"-Ddhfs.objects.sync.timeout=30",
|
||||
"-Ddhfs.objects.sync.ping.timeout=5",
|
||||
"-Ddhfs.objects.reconnect_interval=1s",
|
||||
"-Ddhfs.objects.last-seen.timeout=30",
|
||||
"-Ddhfs.objects.last-seen.update=10",
|
||||
"-Ddhfs.sync.cert-check=false",
|
||||
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
|
||||
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",
|
||||
@@ -1,7 +1,7 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsapp.TestDataCleaner;
|
||||
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsapp.TestDataCleaner;
|
||||
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
@@ -154,7 +154,7 @@ public class LazyFsIT {
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
@@ -195,7 +195,7 @@ public class LazyFsIT {
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
@@ -237,7 +237,7 @@ public class LazyFsIT {
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
@@ -279,7 +279,7 @@ public class LazyFsIT {
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
@@ -322,7 +322,7 @@ public class LazyFsIT {
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
@@ -366,7 +366,7 @@ public class LazyFsIT {
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
@@ -409,7 +409,7 @@ public class LazyFsIT {
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
@@ -453,7 +453,7 @@ public class LazyFsIT {
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import org.junit.jupiter.api.*;
|
||||
@@ -1,32 +0,0 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public class AtomicClock implements Clock<Long>, Serializable {
|
||||
private long _max = 0;
|
||||
|
||||
public AtomicClock(long counter) {
|
||||
_max = counter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTimestamp() {
|
||||
return ++_max;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
_max = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long peekTimestamp() {
|
||||
return _max;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long updateTimestamp(Long receivedTimestamp) {
|
||||
var old = _max;
|
||||
_max = Math.max(_max, receivedTimestamp) + 1;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,10 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends Iterator<Pair<K, V>>, AutoCloseableNoThrow {
|
||||
public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends Iterator<Pair<K, V>>, AutoCloseable {
|
||||
K peekNextKey();
|
||||
|
||||
void skip();
|
||||
@@ -21,4 +20,7 @@ public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends
|
||||
default CloseableKvIterator<K, V> reversed() {
|
||||
return new ReversedKvIterator<K, V>(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
void close();
|
||||
}
|
||||
|
||||
@@ -3,19 +3,20 @@ package com.usatiuk.objects.snapshot;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.MaybeTombstone;
|
||||
import com.usatiuk.objects.iterators.Tombstone;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public interface Snapshot<K extends Comparable<K>, V> extends AutoCloseableNoThrow {
|
||||
public interface Snapshot<K extends Comparable<K>, V> extends AutoCloseable {
|
||||
List<CloseableKvIterator<K, MaybeTombstone<V>>> getIterator(IteratorStart start, K key);
|
||||
|
||||
@Nonnull
|
||||
Optional<V> readObject(K name);
|
||||
|
||||
long id();
|
||||
|
||||
@Override
|
||||
void close();
|
||||
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ public class CachingObjectPersistentStore {
|
||||
SerializingObjectPersistentStore delegate;
|
||||
@ConfigProperty(name = "dhfs.objects.lru.print-stats")
|
||||
boolean printStats;
|
||||
private ExecutorService _commitExecutor;
|
||||
private ExecutorService _statusExecutor;
|
||||
private AtomicLong _cached = new AtomicLong();
|
||||
private AtomicLong _cacheTries = new AtomicLong();
|
||||
@@ -47,7 +46,6 @@ public class CachingObjectPersistentStore {
|
||||
_cache.set(_cache.get().withVersion(s.id()));
|
||||
}
|
||||
|
||||
_commitExecutor = Executors.newSingleThreadExecutor();
|
||||
if (printStats) {
|
||||
_statusExecutor = Executors.newSingleThreadExecutor();
|
||||
_statusExecutor.submit(() -> {
|
||||
@@ -68,7 +66,6 @@ public class CachingObjectPersistentStore {
|
||||
Log.tracev("Committing: {0} writes, {1} deletes", objs.written().size(), objs.deleted().size());
|
||||
|
||||
var cache = _cache.get();
|
||||
var commitFuture = _commitExecutor.submit(() -> delegate.prepareTx(objs, txId).run());
|
||||
for (var write : objs.written()) {
|
||||
cache = cache.withPut(write.getLeft(), Optional.of(write.getRight()));
|
||||
}
|
||||
@@ -76,11 +73,7 @@ public class CachingObjectPersistentStore {
|
||||
cache = cache.withPut(del, Optional.empty());
|
||||
}
|
||||
cache = cache.withVersion(txId);
|
||||
try {
|
||||
commitFuture.get();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
delegate.commitTx(objs, txId);
|
||||
_cache.set(cache);
|
||||
|
||||
Log.tracev("Committed: {0} writes, {1} deletes", objs.written().size(), objs.deleted().size());
|
||||
|
||||
@@ -145,10 +145,9 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Runnable prepareTx(TxManifestRaw names, long txId) {
|
||||
public void commitTx(TxManifestRaw names, long txId) {
|
||||
verifyReady();
|
||||
var txn = _env.txnWrite();
|
||||
try {
|
||||
try (var txn = _env.txnWrite()) {
|
||||
for (var written : names.written()) {
|
||||
var putBb = _db.reserve(txn, written.getKey().toByteBuffer(), written.getValue().size());
|
||||
written.getValue().copyTo(putBb);
|
||||
@@ -163,17 +162,8 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
bbData.putLong(txId);
|
||||
bbData.flip();
|
||||
_db.put(txn, DB_VER_OBJ_NAME.asReadOnlyBuffer(), bbData);
|
||||
} catch (Throwable t) {
|
||||
txn.close();
|
||||
throw t;
|
||||
txn.commit();
|
||||
}
|
||||
return () -> {
|
||||
try {
|
||||
txn.commit();
|
||||
} finally {
|
||||
txn.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -188,12 +178,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
return _root.toFile().getFreeSpace();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsableSpace() {
|
||||
verifyReady();
|
||||
return _root.toFile().getUsableSpace();
|
||||
}
|
||||
|
||||
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>> {
|
||||
private static final Cleaner CLEANER = Cleaner.create();
|
||||
private final Txn<ByteBuffer> _txn; // Managed by the snapshot
|
||||
|
||||
@@ -53,19 +53,18 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
public Runnable prepareTx(TxManifestRaw names, long txId) {
|
||||
return () -> {
|
||||
synchronized (this) {
|
||||
for (var written : names.written()) {
|
||||
_objects = _objects.plus(written.getKey(), written.getValue());
|
||||
}
|
||||
for (JObjectKey key : names.deleted()) {
|
||||
_objects = _objects.minus(key);
|
||||
}
|
||||
assert txId > _lastCommitId;
|
||||
_lastCommitId = txId;
|
||||
@Override
|
||||
public void commitTx(TxManifestRaw names, long txId) {
|
||||
synchronized (this) {
|
||||
for (var written : names.written()) {
|
||||
_objects = _objects.plus(written.getKey(), written.getValue());
|
||||
}
|
||||
};
|
||||
for (JObjectKey key : names.deleted()) {
|
||||
_objects = _objects.minus(key);
|
||||
}
|
||||
assert txId > _lastCommitId;
|
||||
_lastCommitId = txId;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -77,9 +76,4 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
|
||||
public long getFreeSpace() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsableSpace() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,11 +13,9 @@ import java.util.Optional;
|
||||
public interface ObjectPersistentStore {
|
||||
Snapshot<JObjectKey, ByteBuffer> getSnapshot();
|
||||
|
||||
Runnable prepareTx(TxManifestRaw names, long txId);
|
||||
void commitTx(TxManifestRaw names, long txId);
|
||||
|
||||
long getTotalSpace();
|
||||
|
||||
long getFreeSpace();
|
||||
|
||||
long getUsableSpace();
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ public class SerializingObjectPersistentStore {
|
||||
, objs.deleted());
|
||||
}
|
||||
|
||||
Runnable prepareTx(TxManifestObj<? extends JDataVersionedWrapper> objects, long txId) {
|
||||
return delegateStore.prepareTx(prepareManifest(objects), txId);
|
||||
void commitTx(TxManifestObj<? extends JDataVersionedWrapper> objects, long txId) {
|
||||
delegateStore.commitTx(prepareManifest(objects), txId);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ package com.usatiuk.objects.stores;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JDataVersionedWrapperImpl;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.MaybeTombstone;
|
||||
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.transaction.TxCommitException;
|
||||
import com.usatiuk.objects.transaction.TxRecord;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -25,7 +27,6 @@ import javax.annotation.Nonnull;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
@@ -187,7 +188,7 @@ public class WritebackObjectPersistentStore {
|
||||
Log.info("Writeback thread exiting");
|
||||
}
|
||||
|
||||
public long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
|
||||
private long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
|
||||
verifyReady();
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
@@ -218,17 +219,18 @@ public class WritebackObjectPersistentStore {
|
||||
var curPwMap = curPw.pendingWrites();
|
||||
|
||||
for (var action : writes) {
|
||||
var key = action.key();
|
||||
switch (action) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
// Log.tracev("Flushing object {0}", write.key());
|
||||
var wrapper = new JDataVersionedWrapperImpl(write.data(), oursId);
|
||||
curPwMap = curPwMap.plus(write.key(), new PendingWrite(wrapper, oursId));
|
||||
curPwMap = curPwMap.plus(key, new PendingWrite(wrapper, oursId));
|
||||
curBundle.commit(wrapper);
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
// Log.tracev("Deleting object {0}", deleted.key());
|
||||
curPwMap = curPwMap.plus(deleted.key(), new PendingDelete(deleted.key(), oursId));
|
||||
curBundle.delete(deleted.key());
|
||||
curPwMap = curPwMap.plus(key, new PendingDelete(key, oursId));
|
||||
curBundle.delete(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,8 +25,8 @@ public class CurrentTransaction implements Transaction {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
return transactionManager.current().get(type, key, strategy);
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key) {
|
||||
return transactionManager.current().get(type, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.utils.DataLocker;
|
||||
import jakarta.annotation.Nonnull;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class LockManager {
|
||||
private final DataLocker _objLocker = new DataLocker();
|
||||
|
||||
@Nonnull
|
||||
public AutoCloseableNoThrow lockObject(JObjectKey key) {
|
||||
return _objLocker.lock(key);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public AutoCloseableNoThrow tryLockObject(JObjectKey key) {
|
||||
return _objLocker.tryLock(key);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
public enum LockingStrategy {
|
||||
OPTIMISTIC, // Optimistic write, no blocking other possible writers/readers
|
||||
WRITE, // Write lock, blocks all other writers
|
||||
}
|
||||
@@ -11,17 +11,13 @@ import java.util.Optional;
|
||||
public interface Transaction extends TransactionHandle {
|
||||
void onCommit(Runnable runnable);
|
||||
|
||||
<T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy);
|
||||
<T extends JData> Optional<T> get(Class<T> type, JObjectKey key);
|
||||
|
||||
<T extends JData> void put(JData obj);
|
||||
<T extends JData> void putNew(JData obj);
|
||||
|
||||
void delete(JObjectKey key);
|
||||
|
||||
default <T extends JData> Optional<T> get(Class<T> type, JObjectKey key) {
|
||||
return get(type, key, LockingStrategy.OPTIMISTIC);
|
||||
}
|
||||
|
||||
CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key);
|
||||
|
||||
default CloseableKvIterator<JObjectKey, JData> getIterator(JObjectKey key) {
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
public interface TransactionFactory {
|
||||
TransactionPrivate createTransaction();
|
||||
}
|
||||
@@ -1,268 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@Singleton
|
||||
public class TransactionFactoryImpl implements TransactionFactory {
|
||||
@Inject
|
||||
WritebackObjectPersistentStore writebackObjectPersistentStore;
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
@ConfigProperty(name = "dhfs.objects.transaction.never-lock")
|
||||
boolean neverLock;
|
||||
|
||||
@Override
|
||||
public TransactionPrivate createTransaction() {
|
||||
return new TransactionImpl();
|
||||
}
|
||||
|
||||
private interface ReadTrackingInternalCrap {
|
||||
boolean fromSource();
|
||||
|
||||
JData obj();
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
private record ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped) implements ReadTrackingInternalCrap {
|
||||
@Override
|
||||
public boolean fromSource() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JData obj() {
|
||||
return wrapped.data();
|
||||
}
|
||||
}
|
||||
|
||||
private record ReadTrackingInternalCrapTx(JData obj) implements ReadTrackingInternalCrap {
|
||||
@Override
|
||||
public boolean fromSource() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private class TransactionImpl implements TransactionPrivate {
|
||||
private final Map<JObjectKey, Optional<JDataVersionedWrapper>> _readSet = new HashMap<>();
|
||||
private final NavigableMap<JObjectKey, TxRecord.TxObjectRecord<?>> _writes = new TreeMap<>();
|
||||
private final List<Runnable> _onCommit = new LinkedList<>();
|
||||
private final List<Runnable> _onFlush = new LinkedList<>();
|
||||
private final HashSet<JObjectKey> _knownNew = new HashSet<>();
|
||||
private final Snapshot<JObjectKey, JDataVersionedWrapper> _snapshot;
|
||||
private boolean _closed = false;
|
||||
private Map<JObjectKey, TxRecord.TxObjectRecord<?>> _newWrites = new HashMap<>();
|
||||
|
||||
private TransactionImpl() {
|
||||
_snapshot = writebackObjectPersistentStore.getSnapshot();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCommit(Runnable runnable) {
|
||||
_onCommit.add(runnable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
_onFlush.add(runnable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Runnable> getOnCommit() {
|
||||
return Collections.unmodifiableCollection(_onCommit);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snapshot<JObjectKey, JDataVersionedWrapper> snapshot() {
|
||||
return _snapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Runnable> getOnFlush() {
|
||||
return Collections.unmodifiableCollection(_onFlush);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> getFromSource(Class<T> type, JObjectKey key) {
|
||||
if (_knownNew.contains(key)) {
|
||||
return Optional.empty();
|
||||
}
|
||||
var got = _readSet.computeIfAbsent(key, k -> {
|
||||
var read = _snapshot.readObject(k);
|
||||
return read;
|
||||
});
|
||||
if (got.isEmpty())
|
||||
return Optional.empty();
|
||||
var gotData = got.get();
|
||||
return Optional.of(type.cast(gotData.data()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
switch (_writes.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
return Optional.of(type.cast(write.data()));
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
return Optional.empty();
|
||||
}
|
||||
case null, default -> {
|
||||
}
|
||||
}
|
||||
|
||||
return getFromSource(type, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(JObjectKey key) {
|
||||
var got = _writes.get(key);
|
||||
if (got != null) {
|
||||
if (got instanceof TxRecord.TxObjectRecordDeleted) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_writes.put(key, new TxRecord.TxObjectRecordDeleted(key));
|
||||
_newWrites.put(key, new TxRecord.TxObjectRecordDeleted(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||
Log.tracev("Getting tx iterator with start={0}, key={1}", start, key);
|
||||
return new ReadTrackingIterator(new TombstoneSkippingIterator<JObjectKey, ReadTrackingInternalCrap>(start, key,
|
||||
ListUtils.prependAndMap(
|
||||
new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, start, key),
|
||||
t -> switch (t) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write ->
|
||||
new DataWrapper<ReadTrackingInternalCrap>(new ReadTrackingInternalCrapTx(write.data()));
|
||||
case TxRecord.TxObjectRecordDeleted deleted ->
|
||||
new TombstoneImpl<ReadTrackingInternalCrap>();
|
||||
case null, default -> null;
|
||||
}),
|
||||
_snapshot.getIterator(start, key),
|
||||
itin -> new MappingKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>, MaybeTombstone<ReadTrackingInternalCrap>>(itin,
|
||||
d -> switch (d) {
|
||||
case Data<JDataVersionedWrapper> w ->
|
||||
new DataWrapper<>(new ReadTrackingInternalCrapSource(w.value()));
|
||||
case Tombstone<JDataVersionedWrapper> t -> new TombstoneImpl<>();
|
||||
case null, default -> null;
|
||||
}))));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(JData obj) {
|
||||
var read = _readSet.get(obj.key());
|
||||
if (read != null && (read.map(JDataVersionedWrapper::data).orElse(null) == obj)) {
|
||||
return;
|
||||
}
|
||||
|
||||
_writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
_newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putNew(JData obj) {
|
||||
_knownNew.add(obj.key());
|
||||
|
||||
_writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
_newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<TxRecord.TxObjectRecord<?>> drainNewWrites() {
|
||||
var ret = _newWrites;
|
||||
_newWrites = new HashMap<>();
|
||||
return ret.values();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<JObjectKey, Optional<JDataVersionedWrapper>> reads() {
|
||||
return _readSet;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<JObjectKey> knownNew() {
|
||||
return _knownNew;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (_closed) return;
|
||||
_closed = true;
|
||||
_snapshot.close();
|
||||
}
|
||||
|
||||
private class ReadTrackingIterator implements CloseableKvIterator<JObjectKey, JData> {
|
||||
private final CloseableKvIterator<JObjectKey, ReadTrackingInternalCrap> _backing;
|
||||
|
||||
public ReadTrackingIterator(CloseableKvIterator<JObjectKey, ReadTrackingInternalCrap> backing) {
|
||||
_backing = backing;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekNextKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekPrevKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JData> prev() {
|
||||
var got = _backing.prev();
|
||||
if (got.getValue() instanceof ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped)) {
|
||||
_readSet.putIfAbsent(got.getKey(), Optional.of(wrapped));
|
||||
}
|
||||
return Pair.of(got.getKey(), got.getValue().obj());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JData> next() {
|
||||
var got = _backing.next();
|
||||
if (got.getValue() instanceof ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped)) {
|
||||
_readSet.putIfAbsent(got.getKey(), Optional.of(wrapped));
|
||||
}
|
||||
return Pair.of(got.getKey(), got.getValue().obj());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
public interface TransactionHandlePrivate extends TransactionHandle {
|
||||
Collection<Runnable> getOnFlush();
|
||||
}
|
||||
@@ -0,0 +1,240 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
class TransactionImpl implements Transaction, AutoCloseable {
|
||||
private final Map<JObjectKey, Optional<JDataVersionedWrapper>> _readSet = new HashMap<>();
|
||||
private final NavigableMap<JObjectKey, TxRecord.TxObjectRecord<?>> _writes = new TreeMap<>();
|
||||
private final List<Runnable> _onCommit = new LinkedList<>();
|
||||
private final List<Runnable> _onFlush = new LinkedList<>();
|
||||
private final HashSet<JObjectKey> _knownNew = new HashSet<>();
|
||||
private final Snapshot<JObjectKey, JDataVersionedWrapper> _snapshot;
|
||||
private boolean _closed = false;
|
||||
|
||||
private boolean _writeTrack = false;
|
||||
private Map<JObjectKey, TxRecord.TxObjectRecord<?>> _newWrites = new HashMap<>();
|
||||
|
||||
private interface ReadTrackingInternalCrap {
|
||||
boolean fromSource();
|
||||
|
||||
JData obj();
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
private record ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped) implements ReadTrackingInternalCrap {
|
||||
@Override
|
||||
public boolean fromSource() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JData obj() {
|
||||
return wrapped.data();
|
||||
}
|
||||
}
|
||||
|
||||
private record ReadTrackingInternalCrapTx(JData obj) implements ReadTrackingInternalCrap {
|
||||
@Override
|
||||
public boolean fromSource() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
TransactionImpl(Snapshot<JObjectKey, JDataVersionedWrapper> snapshot) {
|
||||
_snapshot = snapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCommit(Runnable runnable) {
|
||||
_onCommit.add(runnable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
_onFlush.add(runnable);
|
||||
}
|
||||
|
||||
Collection<Runnable> getOnCommit() {
|
||||
return Collections.unmodifiableCollection(_onCommit);
|
||||
}
|
||||
|
||||
Snapshot<JObjectKey, JDataVersionedWrapper> snapshot() {
|
||||
return _snapshot;
|
||||
}
|
||||
|
||||
Collection<Runnable> getOnFlush() {
|
||||
return Collections.unmodifiableCollection(_onFlush);
|
||||
}
|
||||
|
||||
<T extends JData> Optional<T> getFromSource(Class<T> type, JObjectKey key) {
|
||||
if (_knownNew.contains(key)) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
return _readSet.computeIfAbsent(key, _snapshot::readObject)
|
||||
.map(JDataVersionedWrapper::data)
|
||||
.map(type::cast);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key) {
|
||||
return switch (_writes.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> Optional.of(type.cast(write.data()));
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> Optional.empty();
|
||||
case null -> getFromSource(type, key);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(JObjectKey key) {
|
||||
var record = new TxRecord.TxObjectRecordDeleted(key);
|
||||
if (_writes.put(key, record) instanceof TxRecord.TxObjectRecordDeleted) {
|
||||
return;
|
||||
}
|
||||
if (_writeTrack)
|
||||
_newWrites.put(key, record);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||
Log.tracev("Getting tx iterator with start={0}, key={1}", start, key);
|
||||
return new ReadTrackingIterator(new TombstoneSkippingIterator<JObjectKey, ReadTrackingInternalCrap>(start, key,
|
||||
ListUtils.prependAndMap(
|
||||
new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, start, key),
|
||||
t -> switch (t) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write ->
|
||||
new DataWrapper<ReadTrackingInternalCrap>(new ReadTrackingInternalCrapTx(write.data()));
|
||||
case TxRecord.TxObjectRecordDeleted deleted ->
|
||||
new TombstoneImpl<ReadTrackingInternalCrap>();
|
||||
case null, default -> null;
|
||||
}),
|
||||
_snapshot.getIterator(start, key),
|
||||
itin -> new MappingKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>, MaybeTombstone<ReadTrackingInternalCrap>>(itin,
|
||||
d -> switch (d) {
|
||||
case Data<JDataVersionedWrapper> w ->
|
||||
new DataWrapper<>(new ReadTrackingInternalCrapSource(w.value()));
|
||||
case Tombstone<JDataVersionedWrapper> t -> new TombstoneImpl<>();
|
||||
case null, default -> null;
|
||||
}))));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(JData obj) {
|
||||
var key = obj.key();
|
||||
var read = _readSet.get(key);
|
||||
if (read != null && (read.map(JDataVersionedWrapper::data).orElse(null) == obj)) {
|
||||
return;
|
||||
}
|
||||
|
||||
var record = new TxRecord.TxObjectRecordWrite<>(obj);
|
||||
_writes.put(key, record);
|
||||
if (_writeTrack)
|
||||
_newWrites.put(key, record);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putNew(JData obj) {
|
||||
var key = obj.key();
|
||||
_knownNew.add(key);
|
||||
|
||||
var record = new TxRecord.TxObjectRecordWrite<>(obj);
|
||||
_writes.put(key, record);
|
||||
if (_writeTrack)
|
||||
_newWrites.put(key, record);
|
||||
}
|
||||
|
||||
Collection<TxRecord.TxObjectRecord<?>> drainNewWrites() {
|
||||
if (!_writeTrack) {
|
||||
_writeTrack = true;
|
||||
return Collections.unmodifiableCollection(_writes.values());
|
||||
}
|
||||
var ret = _newWrites;
|
||||
_newWrites = new HashMap<>();
|
||||
return ret.values();
|
||||
}
|
||||
|
||||
Map<JObjectKey, Optional<JDataVersionedWrapper>> reads() {
|
||||
return _readSet;
|
||||
}
|
||||
|
||||
Set<JObjectKey> knownNew() {
|
||||
return _knownNew;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (_closed) return;
|
||||
_closed = true;
|
||||
_snapshot.close();
|
||||
}
|
||||
|
||||
private class ReadTrackingIterator implements CloseableKvIterator<JObjectKey, JData> {
|
||||
private final CloseableKvIterator<JObjectKey, ReadTrackingInternalCrap> _backing;
|
||||
|
||||
public ReadTrackingIterator(CloseableKvIterator<JObjectKey, ReadTrackingInternalCrap> backing) {
|
||||
_backing = backing;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekNextKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekPrevKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JData> prev() {
|
||||
var got = _backing.prev();
|
||||
if (got.getValue() instanceof ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped)) {
|
||||
_readSet.putIfAbsent(got.getKey(), Optional.of(wrapped));
|
||||
}
|
||||
return Pair.of(got.getKey(), got.getValue().obj());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JData> next() {
|
||||
var got = _backing.next();
|
||||
if (got.getValue() instanceof ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped)) {
|
||||
_readSet.putIfAbsent(got.getKey(), Optional.of(wrapped));
|
||||
}
|
||||
return Pair.of(got.getKey(), got.getValue().obj());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.utils.VoidFn;
|
||||
import io.quarkus.logging.Log;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
@@ -12,8 +11,8 @@ public interface TransactionManager {
|
||||
|
||||
void rollback();
|
||||
|
||||
default <T> T runTries(Supplier<T> supplier, int tries) {
|
||||
if (current() != null) {
|
||||
default <T> T runTries(Supplier<T> supplier, int tries, boolean nest) {
|
||||
if (!nest && current() != null) {
|
||||
return supplier.get();
|
||||
}
|
||||
|
||||
@@ -41,9 +40,9 @@ public interface TransactionManager {
|
||||
}
|
||||
}
|
||||
|
||||
default TransactionHandle runTries(VoidFn fn, int tries) {
|
||||
if (current() != null) {
|
||||
fn.apply();
|
||||
default TransactionHandle runTries(Runnable fn, int tries, boolean nest) {
|
||||
if (!nest && current() != null) {
|
||||
fn.run();
|
||||
return new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
@@ -56,7 +55,7 @@ public interface TransactionManager {
|
||||
begin();
|
||||
boolean commit = false;
|
||||
try {
|
||||
fn.apply();
|
||||
fn.run();
|
||||
commit = true;
|
||||
var ret = commit();
|
||||
return ret;
|
||||
@@ -74,23 +73,38 @@ public interface TransactionManager {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
default TransactionHandle run(VoidFn fn) {
|
||||
return runTries(fn, 10);
|
||||
default <T> T runTries(Supplier<T> supplier, int tries) {
|
||||
return runTries(supplier, tries, false);
|
||||
}
|
||||
|
||||
default TransactionHandle runTries(Runnable fn, int tries) {
|
||||
return runTries(fn, tries, false);
|
||||
}
|
||||
|
||||
default TransactionHandle run(Runnable fn, boolean nest) {
|
||||
return runTries(fn, 10, nest);
|
||||
}
|
||||
|
||||
default <T> T run(Supplier<T> supplier, boolean nest) {
|
||||
return runTries(supplier, 10, nest);
|
||||
}
|
||||
|
||||
default TransactionHandle run(Runnable fn) {
|
||||
return run(fn, false);
|
||||
}
|
||||
|
||||
default <T> T run(Supplier<T> supplier) {
|
||||
return runTries(supplier, 10);
|
||||
return run(supplier, false);
|
||||
}
|
||||
|
||||
default void executeTx(VoidFn fn) {
|
||||
run(fn);
|
||||
default void executeTx(Runnable fn) {
|
||||
run(fn, false);
|
||||
}
|
||||
|
||||
default <T> T executeTx(Supplier<T> supplier) {
|
||||
return run(supplier);
|
||||
return run(supplier, false);
|
||||
}
|
||||
|
||||
Transaction current();
|
||||
|
||||
@@ -1,47 +1,48 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.Stack;
|
||||
|
||||
@Singleton
|
||||
public class TransactionManagerImpl implements TransactionManager {
|
||||
private static final ThreadLocal<TransactionPrivate> _currentTransaction = new ThreadLocal<>();
|
||||
private static final ThreadLocal<Stack<TransactionImpl>> _currentTransaction = ThreadLocal.withInitial(Stack::new);
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
TransactionService transactionService;
|
||||
|
||||
@Override
|
||||
public void begin() {
|
||||
if (_currentTransaction.get() != null) {
|
||||
throw new IllegalStateException("Transaction already started");
|
||||
}
|
||||
|
||||
Log.trace("Starting transaction");
|
||||
var tx = jObjectManager.createTransaction();
|
||||
_currentTransaction.set(tx);
|
||||
var tx = transactionService.createTransaction();
|
||||
_currentTransaction.get().push(tx);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionHandle commit() {
|
||||
if (_currentTransaction.get() == null) {
|
||||
var stack = _currentTransaction.get();
|
||||
if (stack.empty()) {
|
||||
throw new IllegalStateException("No transaction started");
|
||||
}
|
||||
var peeked = stack.peek();
|
||||
|
||||
Log.trace("Committing transaction");
|
||||
|
||||
Pair<Collection<Runnable>, TransactionHandle> ret;
|
||||
try {
|
||||
ret = jObjectManager.commit(_currentTransaction.get());
|
||||
ret = transactionService.commit(peeked);
|
||||
} catch (Throwable e) {
|
||||
Log.trace("Transaction commit failed", e);
|
||||
throw e;
|
||||
} finally {
|
||||
_currentTransaction.get().close();
|
||||
_currentTransaction.remove();
|
||||
peeked.close();
|
||||
stack.pop();
|
||||
if (stack.empty())
|
||||
_currentTransaction.remove();
|
||||
}
|
||||
|
||||
for (var r : ret.getLeft()) {
|
||||
@@ -56,24 +57,33 @@ public class TransactionManagerImpl implements TransactionManager {
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
if (_currentTransaction.get() == null) {
|
||||
var stack = _currentTransaction.get();
|
||||
if (stack.empty()) {
|
||||
throw new IllegalStateException("No transaction started");
|
||||
}
|
||||
var peeked = stack.peek();
|
||||
|
||||
try {
|
||||
jObjectManager.rollback(_currentTransaction.get());
|
||||
transactionService.rollback(peeked);
|
||||
} catch (Throwable e) {
|
||||
Log.error("Transaction rollback failed", e);
|
||||
throw e;
|
||||
} finally {
|
||||
_currentTransaction.get().close();
|
||||
_currentTransaction.remove();
|
||||
peeked.close();
|
||||
stack.pop();
|
||||
if (stack.empty())
|
||||
_currentTransaction.remove();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public Transaction current() {
|
||||
return _currentTransaction.get();
|
||||
var stack = _currentTransaction.get();
|
||||
if (stack.empty()) {
|
||||
return null;
|
||||
}
|
||||
return stack.peek();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
// The transaction interface actually used by user code to retrieve objects
|
||||
public interface TransactionPrivate extends Transaction, TransactionHandlePrivate, AutoCloseableNoThrow {
|
||||
Collection<TxRecord.TxObjectRecord<?>> drainNewWrites();
|
||||
|
||||
Map<JObjectKey, Optional<JDataVersionedWrapper>> reads();
|
||||
|
||||
Set<JObjectKey> knownNew();
|
||||
|
||||
<T extends JData> Optional<T> getFromSource(Class<T> type, JObjectKey key);
|
||||
|
||||
Collection<Runnable> getOnCommit();
|
||||
|
||||
Snapshot<JObjectKey, JDataVersionedWrapper> snapshot();
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.utils.DataLocker;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
@@ -19,24 +20,21 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
import java.util.*;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class JObjectManager {
|
||||
public class TransactionService {
|
||||
private static final List<PreCommitTxHook> _preCommitTxHooks;
|
||||
@Inject
|
||||
WritebackObjectPersistentStore writebackObjectPersistentStore;
|
||||
@Inject
|
||||
TransactionFactory transactionFactory;
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
|
||||
private boolean _ready = false;
|
||||
private final DataLocker _objLocker = new DataLocker();
|
||||
|
||||
static {
|
||||
_preCommitTxHooks = List.copyOf(CDI.current().select(PreCommitTxHook.class).stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList());
|
||||
}
|
||||
|
||||
JObjectManager(Instance<PreCommitTxHook> preCommitTxHooks) {
|
||||
TransactionService(Instance<PreCommitTxHook> preCommitTxHooks) {
|
||||
Log.debugv("Pre-commit hooks: {0}", String.join("->", _preCommitTxHooks.stream().map(Objects::toString).toList()));
|
||||
}
|
||||
|
||||
@@ -48,14 +46,14 @@ public class JObjectManager {
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
public TransactionPrivate createTransaction() {
|
||||
public TransactionImpl createTransaction() {
|
||||
verifyReady();
|
||||
var tx = transactionFactory.createTransaction();
|
||||
var tx = new TransactionImpl(writebackObjectPersistentStore.getSnapshot());
|
||||
Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id());
|
||||
return tx;
|
||||
}
|
||||
|
||||
public Pair<Collection<Runnable>, TransactionHandle> commit(TransactionPrivate tx) {
|
||||
public Pair<Collection<Runnable>, TransactionHandle> commit(TransactionImpl tx) {
|
||||
verifyReady();
|
||||
var writes = new HashMap<JObjectKey, TxRecord.TxObjectRecord<?>>();
|
||||
Snapshot<JObjectKey, JDataVersionedWrapper> commitSnapshot = null;
|
||||
@@ -75,11 +73,12 @@ public class JObjectManager {
|
||||
}
|
||||
|
||||
for (var n : tx.drainNewWrites()) {
|
||||
var key = n.key();
|
||||
for (var hookPut : hookIterationData) {
|
||||
hookPut.pendingWrites().put(n.key(), n);
|
||||
hookPut.pendingWrites().put(key, n);
|
||||
pendingCount++;
|
||||
}
|
||||
writes.put(n.key(), n);
|
||||
writes.put(key, n);
|
||||
}
|
||||
|
||||
|
||||
@@ -108,19 +107,20 @@ public class JObjectManager {
|
||||
// Log.trace("Commit iteration with " + curIteration.size() + " records for hook " + hook.getClass());
|
||||
|
||||
for (var entry : curIteration.entrySet()) {
|
||||
var key = entry.getKey();
|
||||
// Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey());
|
||||
var oldObj = getPrev.apply(entry.getKey());
|
||||
lastCurHookSeen.put(entry.getKey(), entry.getValue());
|
||||
var oldObj = getPrev.apply(key);
|
||||
lastCurHookSeen.put(key, entry.getValue());
|
||||
switch (entry.getValue()) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
if (oldObj == null) {
|
||||
hook.onCreate(write.key(), write.data());
|
||||
hook.onCreate(key, write.data());
|
||||
} else {
|
||||
hook.onChange(write.key(), oldObj, write.data());
|
||||
hook.onChange(key, oldObj, write.data());
|
||||
}
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
hook.onDelete(deleted.key(), oldObj);
|
||||
hook.onDelete(key, oldObj);
|
||||
}
|
||||
default -> throw new TxCommitException("Unexpected value: " + entry);
|
||||
}
|
||||
@@ -130,16 +130,17 @@ public class JObjectManager {
|
||||
curIteration.clear();
|
||||
|
||||
for (var n : tx.drainNewWrites()) {
|
||||
var key = n.key();
|
||||
for (var hookPut : hookIterationData) {
|
||||
if (hookPut == hookId) {
|
||||
lastCurHookSeen.put(n.key(), n);
|
||||
lastCurHookSeen.put(key, n);
|
||||
continue;
|
||||
}
|
||||
var before = hookPut.pendingWrites().put(n.key(), n);
|
||||
var before = hookPut.pendingWrites().put(key, n);
|
||||
if (before == null)
|
||||
pendingCount++;
|
||||
}
|
||||
writes.put(n.key(), n);
|
||||
writes.put(key, n);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -160,7 +161,7 @@ public class JObjectManager {
|
||||
for (var key : toLock) {
|
||||
if (tx.knownNew().contains(key))
|
||||
continue;
|
||||
var lock = lockManager.lockObject(key);
|
||||
var lock = _objLocker.lock(key);
|
||||
toUnlock.add(lock);
|
||||
}
|
||||
|
||||
@@ -256,7 +257,7 @@ public class JObjectManager {
|
||||
}
|
||||
}
|
||||
|
||||
public void rollback(TransactionPrivate tx) {
|
||||
public void rollback(TransactionImpl tx) {
|
||||
verifyReady();
|
||||
tx.close();
|
||||
}
|
||||
@@ -1,10 +1,11 @@
|
||||
dhfs.objects.persistence=lmdb
|
||||
dhfs.objects.writeback.limit=134217728
|
||||
dhfs.objects.lru.limit=134217728
|
||||
dhfs.objects.writeback.limit=16777216
|
||||
dhfs.objects.lru.limit=67108864
|
||||
dhfs.objects.lru.print-stats=false
|
||||
dhfs.objects.lock_timeout_secs=15
|
||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs
|
||||
dhfs.objects.persistence.snapshot-extra-checks=false
|
||||
dhfs.objects.transaction.never-lock=true
|
||||
dhfs.objects.last-seen.update=60
|
||||
dhfs.objects.last-seen.timeout=43200
|
||||
quarkus.log.category."com.usatiuk.objects.iterators".level=INFO
|
||||
quarkus.log.category."com.usatiuk.objects.iterators".min-level=INFO
|
||||
|
||||
@@ -2,14 +2,11 @@ package com.usatiuk.objects;
|
||||
|
||||
import com.usatiuk.objects.data.Parent;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@@ -151,12 +148,12 @@ public abstract class ObjectsTestImpl {
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(testInfo.getDisplayName() + "Parent3"), LockingStrategy.OPTIMISTIC).orElse(null);
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(testInfo.getDisplayName() + "Parent3")).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
curTx.put(parent.withName("John2"));
|
||||
});
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(testInfo.getDisplayName() + "Parent3"), LockingStrategy.WRITE).orElse(null);
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(testInfo.getDisplayName() + "Parent3")).orElse(null);
|
||||
Assertions.assertEquals("John2", parent.name());
|
||||
curTx.put(parent.withName("John3"));
|
||||
});
|
||||
@@ -236,10 +233,9 @@ public abstract class ObjectsTestImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(LockingStrategy.class)
|
||||
void editConflict(LockingStrategy strategy, TestInfo testInfo) {
|
||||
String key = testInfo.getDisplayName() + "Parent4" + strategy.name();
|
||||
@Test
|
||||
void editConflict(TestInfo testInfo) {
|
||||
String key = testInfo.getDisplayName() + "Parent4";
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of(key), "John3");
|
||||
curTx.put(newParent);
|
||||
@@ -260,7 +256,7 @@ public abstract class ObjectsTestImpl {
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(key), strategy).orElse(null);
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(key)).orElse(null);
|
||||
curTx.put(parent.withName("John"));
|
||||
Log.warn("Thread 1 commit");
|
||||
}, 0);
|
||||
@@ -276,7 +272,7 @@ public abstract class ObjectsTestImpl {
|
||||
Log.warn("Thread 2");
|
||||
barrier.await(); // Ensure thread 2 tx id is larger than thread 1
|
||||
txm.runTries(() -> {
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(key), strategy).orElse(null);
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(key)).orElse(null);
|
||||
curTx.put(parent.withName("John2"));
|
||||
Log.warn("Thread 2 commit");
|
||||
}, 0);
|
||||
@@ -317,10 +313,9 @@ public abstract class ObjectsTestImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(LockingStrategy.class)
|
||||
void editConflict2(LockingStrategy strategy, TestInfo testInfo) {
|
||||
String key = testInfo.getDisplayName() + "EditConflict2" + strategy.name();
|
||||
@Test
|
||||
void editConflict2(TestInfo testInfo) {
|
||||
String key = testInfo.getDisplayName() + "EditConflict2";
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of(key), "John3");
|
||||
curTx.put(newParent);
|
||||
@@ -341,7 +336,7 @@ public abstract class ObjectsTestImpl {
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(key), strategy).orElse(null);
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(key)).orElse(null);
|
||||
curTx.put(parent.withName("John"));
|
||||
Log.warn("Thread 1 commit");
|
||||
}, 0);
|
||||
@@ -362,7 +357,7 @@ public abstract class ObjectsTestImpl {
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(key), strategy).orElse(null);
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of(key)).orElse(null);
|
||||
curTx.put(parent.withName("John2"));
|
||||
Log.warn("Thread 2 commit");
|
||||
}, 0);
|
||||
@@ -922,10 +917,8 @@ public abstract class ObjectsTestImpl {
|
||||
() -> createGetObject(testInfo),
|
||||
() -> createDeleteObject(testInfo),
|
||||
() -> createCreateObject(testInfo),
|
||||
() -> editConflict(LockingStrategy.WRITE, testInfo),
|
||||
() -> editConflict(LockingStrategy.OPTIMISTIC, testInfo),
|
||||
() -> editConflict2(LockingStrategy.WRITE, testInfo),
|
||||
() -> editConflict2(LockingStrategy.OPTIMISTIC, testInfo),
|
||||
() -> editConflict(testInfo),
|
||||
() -> editConflict2(testInfo),
|
||||
() -> snapshotTest1(testInfo),
|
||||
() -> snapshotTest2(testInfo),
|
||||
() -> snapshotTest3(testInfo),
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
<module>sync-base</module>
|
||||
<module>dhfs-fs</module>
|
||||
<module>dhfs-fuse</module>
|
||||
<module>dhfs-app</module>
|
||||
<module>kleppmanntree</module>
|
||||
<module>objects</module>
|
||||
<module>utils</module>
|
||||
@@ -23,7 +22,6 @@
|
||||
<properties>
|
||||
<compiler-plugin.version>3.12.1</compiler-plugin.version>
|
||||
<!--FIXME-->
|
||||
<maven.compiler.release></maven.compiler.release>
|
||||
<maven.compiler.source>21</maven.compiler.source>
|
||||
<maven.compiler.target>21</maven.compiler.target>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
@@ -88,6 +86,19 @@
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>3.11.2</version>
|
||||
<configuration>
|
||||
<additionalOptions>
|
||||
--add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
||||
--enable-preview
|
||||
</additionalOptions>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
|
||||
@@ -19,8 +19,6 @@ import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
@@ -56,27 +54,21 @@ public class AutosyncProcessor {
|
||||
if (downloadAll)
|
||||
executorService.submit(() -> {
|
||||
Log.info("Adding all to autosync");
|
||||
List<JObjectKey> objs = new LinkedList<>();
|
||||
txm.run(() -> {
|
||||
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
|
||||
while (it.hasNext()) {
|
||||
var key = it.peekNextKey();
|
||||
objs.add(key);
|
||||
// TODO: Nested transactions
|
||||
txm.run(() -> {
|
||||
var gotObj = curTx.get(JData.class, key).orElse(null);
|
||||
if (!(gotObj instanceof RemoteObjectMeta meta))
|
||||
return;
|
||||
if (!meta.hasLocalData())
|
||||
add(meta.key());
|
||||
}, true);
|
||||
it.skip();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for (var obj : objs) {
|
||||
txm.run(() -> {
|
||||
var gotObj = curTx.get(JData.class, obj).orElse(null);
|
||||
if (!(gotObj instanceof RemoteObjectMeta meta))
|
||||
return;
|
||||
if (!meta.hasLocalData())
|
||||
add(meta.key());
|
||||
});
|
||||
}
|
||||
Log.info("Adding all to autosync: finished");
|
||||
});
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package com.usatiuk.dhfs.invalidation;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerConnectedEventListener;
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PeerManager;
|
||||
import com.usatiuk.dhfs.peersync.ReachablePeerManager;
|
||||
import com.usatiuk.utils.SerializationHelper;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
@@ -24,7 +24,7 @@ import java.nio.file.Paths;
|
||||
public class DeferredInvalidationQueueService implements PeerConnectedEventListener {
|
||||
private static final String dataFileName = "invqueue";
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
@Inject
|
||||
InvalidationQueueService invalidationQueueService;
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
@@ -63,7 +63,7 @@ public class DeferredInvalidationQueueService implements PeerConnectedEventListe
|
||||
@Scheduled(every = "15s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP)
|
||||
@Blocking
|
||||
void periodicReturn() {
|
||||
for (var reachable : remoteHostManager.getAvailableHosts())
|
||||
for (var reachable : reachablePeerManager.getAvailableHosts())
|
||||
returnForHost(reachable);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package com.usatiuk.dhfs.invalidation;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
public record IndexUpdateOp(JObjectKey key, PMap<PeerId, Long> changelog) implements Op {
|
||||
public record IndexUpdateOp(JObjectKey key, PMap<PeerId, Long> changelog, JDataRemoteDto data) implements Op {
|
||||
@Override
|
||||
public Collection<JObjectKey> getEscapedRefs() {
|
||||
return List.of(key);
|
||||
|
||||
@@ -2,7 +2,7 @@ package com.usatiuk.dhfs.invalidation;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PeerInfoService;
|
||||
import com.usatiuk.dhfs.peersync.PeerManager;
|
||||
import com.usatiuk.dhfs.peersync.ReachablePeerManager;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.rpc.RemoteObjectServiceClient;
|
||||
import com.usatiuk.objects.JData;
|
||||
@@ -37,7 +37,7 @@ public class InvalidationQueueService {
|
||||
private final AtomicReference<ConcurrentHashSet<JObjectKey>> _toAllQueue = new AtomicReference<>(new ConcurrentHashSet<>());
|
||||
private final DataLocker _locker = new DataLocker();
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
@Inject
|
||||
DeferredInvalidationQueueService deferredInvalidationQueueService;
|
||||
@Inject
|
||||
@@ -103,7 +103,7 @@ public class InvalidationQueueService {
|
||||
}
|
||||
|
||||
if (toAllQueue != null) {
|
||||
var hostInfo = remoteHostManager.getHostStateSnapshot();
|
||||
var hostInfo = reachablePeerManager.getHostStateSnapshot();
|
||||
for (var o : toAllQueue) {
|
||||
for (var h : hostInfo.available())
|
||||
_queue.add(new InvalidationQueueEntry(h, o));
|
||||
@@ -129,7 +129,7 @@ public class InvalidationQueueService {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!remoteHostManager.isReachable(e.peer())) {
|
||||
if (!reachablePeerManager.isReachable(e.peer())) {
|
||||
deferredInvalidationQueueService.defer(e);
|
||||
continue;
|
||||
}
|
||||
@@ -210,14 +210,14 @@ public class InvalidationQueueService {
|
||||
}
|
||||
|
||||
void pushInvalidationToOne(InvalidationQueueEntry entry) {
|
||||
if (remoteHostManager.isReachable(entry.peer()))
|
||||
if (reachablePeerManager.isReachable(entry.peer()))
|
||||
_queue.add(entry);
|
||||
else
|
||||
deferredInvalidationQueueService.defer(entry);
|
||||
}
|
||||
|
||||
void pushInvalidationToOneNoDelay(InvalidationQueueEntry entry) {
|
||||
if (remoteHostManager.isReachable(entry.peer()))
|
||||
if (reachablePeerManager.isReachable(entry.peer()))
|
||||
_queue.addNoDelay(entry);
|
||||
else
|
||||
deferredInvalidationQueueService.defer(entry);
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
package com.usatiuk.dhfs.invalidation;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemotePush;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.syncmap.DtoMapperService;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
@@ -19,11 +23,22 @@ public class RemoteObjectMetaOpExtractor implements OpExtractor<RemoteObjectMeta
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
RemoteTransaction remoteTransaction;
|
||||
@Inject
|
||||
DtoMapperService dtoMapperService;
|
||||
|
||||
@Override
|
||||
public Pair<List<Op>, Runnable> extractOps(RemoteObjectMeta data, PeerId peerId) {
|
||||
return txm.run(() -> {
|
||||
return Pair.of(List.of(new IndexUpdateOp(data.key(), data.changelog())), () -> {
|
||||
JDataRemoteDto dto =
|
||||
data.knownType().isAnnotationPresent(JDataRemotePush.class)
|
||||
? remoteTransaction.getData(data.knownType(), data.key())
|
||||
.map(d -> dtoMapperService.toDto(d, d.dtoClass())).orElse(null)
|
||||
: null;
|
||||
|
||||
if (data.knownType().isAnnotationPresent(JDataRemotePush.class) && dto == null) {
|
||||
Log.warnv("Failed to get data for push {0} of type {1}", data.key(), data.knownType());
|
||||
}
|
||||
return Pair.of(List.of(new IndexUpdateOp(data.key(), data.changelog(), dto)), () -> {
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import com.usatiuk.dhfs.peersync.PeerInfoService;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import com.usatiuk.kleppmanntree.*;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -39,9 +38,9 @@ public class JKleppmannTreeManager {
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
public JKleppmannTree getTree(JObjectKey name, LockingStrategy lockingStrategy, Supplier<JKleppmannTreeNodeMeta> rootNodeSupplier) {
|
||||
public JKleppmannTree getTree(JObjectKey name, Supplier<JKleppmannTreeNodeMeta> rootNodeSupplier) {
|
||||
return txManager.executeTx(() -> {
|
||||
var data = curTx.get(JKleppmannTreePersistentData.class, name, lockingStrategy).orElse(null);
|
||||
var data = curTx.get(JKleppmannTreePersistentData.class, name).orElse(null);
|
||||
if (data == null) {
|
||||
data = new JKleppmannTreePersistentData(
|
||||
name,
|
||||
@@ -66,18 +65,11 @@ public class JKleppmannTreeManager {
|
||||
}
|
||||
|
||||
public Optional<JKleppmannTree> getTree(JObjectKey name) {
|
||||
return getTree(name, LockingStrategy.WRITE);
|
||||
}
|
||||
|
||||
public Optional<JKleppmannTree> getTree(JObjectKey name, LockingStrategy lockingStrategy) {
|
||||
return txManager.executeTx(() -> {
|
||||
return curTx.get(JKleppmannTreePersistentData.class, name, lockingStrategy).map(JKleppmannTree::new);
|
||||
return curTx.get(JKleppmannTreePersistentData.class, name).map(JKleppmannTree::new);
|
||||
});
|
||||
}
|
||||
|
||||
public JKleppmannTree getTree(JObjectKey name, Supplier<JKleppmannTreeNodeMeta> rootNodeSupplier) {
|
||||
return getTree(name, LockingStrategy.WRITE, rootNodeSupplier);
|
||||
}
|
||||
|
||||
public class JKleppmannTree {
|
||||
private final KleppmannTree<Long, PeerId, JKleppmannTreeNodeMeta, JObjectKey> _tree;
|
||||
|
||||
@@ -24,6 +24,6 @@ public class JKleppmannTreePeerInterface implements PeerInterface<PeerId> {
|
||||
|
||||
@Override
|
||||
public Collection<PeerId> getAllPeers() {
|
||||
return peerInfoService.getPeers().stream().map(PeerInfo::id).toList();
|
||||
return peerInfoService.getSynchronizedPeers().stream().map(PeerInfo::id).toList();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,24 +4,48 @@ import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.peertrust.CertificateTools;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemotePush;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import org.pcollections.HashTreePMap;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
import java.security.cert.X509Certificate;
|
||||
|
||||
public record PeerInfo(JObjectKey key, PeerId id, ByteString cert) implements JDataRemote, JDataRemoteDto {
|
||||
@JDataRemotePush
|
||||
public record PeerInfo(JObjectKey key, PeerId id, ByteString cert,
|
||||
PMap<PeerId, Long> kickCounter,
|
||||
long lastSeenTimestamp) implements JDataRemote, JDataRemoteDto {
|
||||
public PeerInfo(PeerId id, byte[] cert) {
|
||||
this(id.toJObjectKey(), id, ByteString.copyFrom(cert));
|
||||
this(id.toJObjectKey(), id, ByteString.copyFrom(cert), HashTreePMap.empty(), System.currentTimeMillis());
|
||||
}
|
||||
|
||||
public X509Certificate parsedCert() {
|
||||
return CertificateTools.certFromBytes(cert.toByteArray());
|
||||
}
|
||||
|
||||
public PeerInfo withKickCounter(PMap<PeerId, Long> kickCounter) {
|
||||
return new PeerInfo(key, id, cert, kickCounter, lastSeenTimestamp);
|
||||
}
|
||||
|
||||
public PeerInfo withIncrementedKickCounter(PeerId peerId) {
|
||||
return new PeerInfo(key, id, cert, kickCounter.plus(peerId, kickCounter.getOrDefault(peerId, 0L) + 1), lastSeenTimestamp);
|
||||
}
|
||||
|
||||
public PeerInfo withLastSeenTimestamp(long lastSeenTimestamp) {
|
||||
return new PeerInfo(key, id, cert, kickCounter, lastSeenTimestamp);
|
||||
}
|
||||
|
||||
public long kickCounterSum() {
|
||||
return kickCounter.values().stream().mapToLong(Long::longValue).sum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PeerInfo{" +
|
||||
"key=" + key +
|
||||
", id=" + id +
|
||||
", kickCounter=" + kickCounter +
|
||||
", lastSeenTimestamp=" + lastSeenTimestamp +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
||||
import com.usatiuk.dhfs.peersync.structs.JKleppmannTreeNodeMetaPeer;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -29,14 +28,10 @@ public class PeerInfoService {
|
||||
@Inject
|
||||
RemoteTransaction remoteTx;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
private JKleppmannTreeManager.JKleppmannTree getTree() {
|
||||
return jKleppmannTreeManager.getTree(TREE_KEY, () -> null);
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(TREE_KEY, LockingStrategy.OPTIMISTIC, () -> null);
|
||||
}
|
||||
|
||||
public Optional<PeerInfo> getPeerInfoImpl(JObjectKey key) {
|
||||
return jObjectTxManager.run(() -> {
|
||||
return curTx.get(JKleppmannTreeNodeHolder.class, key).map(JKleppmannTreeNodeHolder::node).flatMap(node -> {
|
||||
@@ -49,7 +44,7 @@ public class PeerInfoService {
|
||||
|
||||
public boolean existsPeer(PeerId peer) {
|
||||
return jObjectTxManager.run(() -> {
|
||||
var gotKey = getTreeR().traverse(List.of(JKleppmannTreeNodeMetaPeer.peerIdToNodeId(peer).value()));
|
||||
var gotKey = getTree().traverse(List.of(JKleppmannTreeNodeMetaPeer.peerIdToNodeId(peer).value()));
|
||||
if (gotKey == null) {
|
||||
return false;
|
||||
}
|
||||
@@ -59,7 +54,7 @@ public class PeerInfoService {
|
||||
|
||||
public Optional<PeerInfo> getPeerInfo(PeerId peer) {
|
||||
return jObjectTxManager.run(() -> {
|
||||
var gotKey = getTreeR().traverse(List.of(JKleppmannTreeNodeMetaPeer.peerIdToNodeId(peer).value()));
|
||||
var gotKey = getTree().traverse(List.of(JKleppmannTreeNodeMetaPeer.peerIdToNodeId(peer).value()));
|
||||
if (gotKey == null) {
|
||||
return Optional.empty();
|
||||
}
|
||||
@@ -72,7 +67,7 @@ public class PeerInfoService {
|
||||
|
||||
public List<PeerInfo> getPeers() {
|
||||
return jObjectTxManager.run(() -> {
|
||||
var gotKey = getTreeR().traverse(List.of());
|
||||
var gotKey = getTree().traverse(List.of());
|
||||
return curTx.get(JKleppmannTreeNodeHolder.class, gotKey).map(JKleppmannTreeNodeHolder::node).map(
|
||||
node -> node.children().keySet().stream()
|
||||
.map(JObjectKey::of).map(this::getPeerInfoImpl)
|
||||
@@ -87,32 +82,42 @@ public class PeerInfoService {
|
||||
|
||||
public List<PeerInfo> getPeersNoSelf() {
|
||||
return jObjectTxManager.run(() -> {
|
||||
var gotKey = getTreeR().traverse(List.of());
|
||||
return curTx.get(JKleppmannTreeNodeHolder.class, gotKey).map(JKleppmannTreeNodeHolder::node).map(
|
||||
node -> node.children().keySet().stream()
|
||||
.map(JObjectKey::of).map(this::getPeerInfoImpl)
|
||||
.filter(o -> {
|
||||
if (o.isEmpty())
|
||||
Log.warnv("Could not get peer info for peer {0}", o);
|
||||
return o.isPresent();
|
||||
}).map(Optional::get).filter(
|
||||
peerInfo -> !peerInfo.id().equals(persistentPeerDataService.getSelfUuid())).toList())
|
||||
.orElseThrow();
|
||||
return getPeers().stream().filter(
|
||||
peerInfo -> !peerInfo.id().equals(persistentPeerDataService.getSelfUuid())).toList();
|
||||
});
|
||||
}
|
||||
|
||||
public List<PeerInfo> getSynchronizedPeers() {
|
||||
return jObjectTxManager.run(() -> {
|
||||
return getPeers().stream().filter(pi -> {
|
||||
if (pi.id().equals(persistentPeerDataService.getSelfUuid())) {
|
||||
return true;
|
||||
}
|
||||
return persistentPeerDataService.isInitialSyncDone(pi.id());
|
||||
}).toList();
|
||||
});
|
||||
}
|
||||
|
||||
public List<PeerInfo> getSynchronizedPeersNoSelf() {
|
||||
return jObjectTxManager.run(() -> {
|
||||
return getPeersNoSelf().stream().filter(pi -> {
|
||||
return persistentPeerDataService.isInitialSyncDone(pi.id());
|
||||
}).toList();
|
||||
});
|
||||
}
|
||||
|
||||
public void putPeer(PeerId id, byte[] cert) {
|
||||
jObjectTxManager.run(() -> {
|
||||
var parent = getTreeW().traverse(List.of());
|
||||
var parent = getTree().traverse(List.of());
|
||||
var newPeerInfo = new PeerInfo(id, cert);
|
||||
remoteTx.putData(newPeerInfo);
|
||||
getTreeW().move(parent, new JKleppmannTreeNodeMetaPeer(newPeerInfo.id()), JKleppmannTreeNodeMetaPeer.peerIdToNodeId(newPeerInfo.id()));
|
||||
getTree().move(parent, new JKleppmannTreeNodeMetaPeer(newPeerInfo.id()), JKleppmannTreeNodeMetaPeer.peerIdToNodeId(newPeerInfo.id()));
|
||||
});
|
||||
}
|
||||
|
||||
public void removePeer(PeerId id) {
|
||||
jObjectTxManager.run(() -> {
|
||||
var gotKey = getTreeR().traverse(List.of(JKleppmannTreeNodeMetaPeer.peerIdToNodeId(id).value()));
|
||||
var gotKey = getTree().traverse(List.of(JKleppmannTreeNodeMetaPeer.peerIdToNodeId(id).value()));
|
||||
if (gotKey == null) {
|
||||
return;
|
||||
}
|
||||
@@ -121,7 +126,7 @@ public class PeerInfoService {
|
||||
Log.warn("Peer " + id + " not found in the tree");
|
||||
return;
|
||||
}
|
||||
getTreeW().trash(node.meta(), node.key());
|
||||
getTree().trash(node.meta(), node.key());
|
||||
curTx.onCommit(persistentPeerDataService::updateCerts);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -89,10 +89,11 @@ public class PeerInfoSyncHandler implements ObjSyncHandler<PeerInfo, PeerInfo> {
|
||||
if (oursCurData == null)
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
|
||||
|
||||
if (!receivedData.equals(oursCurData))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("PeerInfo data conflict"));
|
||||
if (!receivedData.cert().equals(oursCurData.cert()))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("PeerInfo certificate conflict for " + key));
|
||||
|
||||
HashPMap<PeerId, Long> newChangelog = HashTreePMap.from(current.changelog());
|
||||
HashPMap<PeerId, Long> newKickCounter = HashTreePMap.from(oursCurData.kickCounter());
|
||||
|
||||
for (var entry : receivedChangelog.entrySet()) {
|
||||
newChangelog = newChangelog.plus(entry.getKey(),
|
||||
@@ -100,6 +101,20 @@ public class PeerInfoSyncHandler implements ObjSyncHandler<PeerInfo, PeerInfo> {
|
||||
);
|
||||
}
|
||||
|
||||
for (var entry : receivedData.kickCounter().entrySet()) {
|
||||
newKickCounter = newKickCounter.plus(entry.getKey(),
|
||||
Long.max(newKickCounter.getOrDefault(entry.getKey(), 0L), entry.getValue())
|
||||
);
|
||||
}
|
||||
|
||||
var newData = oursCurData.withKickCounter(newKickCounter)
|
||||
.withLastSeenTimestamp(Math.max(oursCurData.lastSeenTimestamp(), receivedData.lastSeenTimestamp()));
|
||||
|
||||
if (!newData.equals(oursCurData))
|
||||
newChangelog = newChangelog.plus(persistentPeerDataService.getSelfUuid(), newChangelog.getOrDefault(persistentPeerDataService.getSelfUuid(), 0L) + 1L);
|
||||
|
||||
remoteTx.putDataRaw(newData);
|
||||
|
||||
current = current.withChangelog(newChangelog);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
package com.usatiuk.dhfs.peersync;
|
||||
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import io.quarkus.scheduler.Scheduled;
|
||||
import io.smallrye.common.annotation.Blocking;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
@ApplicationScoped
|
||||
public class PeerLastSeenUpdater {
|
||||
@Inject
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
@Inject
|
||||
PeerInfoService peerInfoService;
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
TransactionManager txm;
|
||||
@Inject
|
||||
RemoteTransaction remoteTransaction;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.last-seen.timeout")
|
||||
long lastSeenTimeout;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@Scheduled(every = "${dhfs.objects.last-seen.update}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP)
|
||||
@Blocking
|
||||
void update() {
|
||||
var snapshot = reachablePeerManager.getHostStateSnapshot();
|
||||
for (var a : snapshot.available()) {
|
||||
txm.run(() -> {
|
||||
var curInfo = remoteTransaction.getData(PeerInfo.class, a.id()).orElse(null);
|
||||
if (curInfo == null) return;
|
||||
|
||||
var newInfo = curInfo.withLastSeenTimestamp(System.currentTimeMillis());
|
||||
remoteTransaction.putData(newInfo);
|
||||
});
|
||||
}
|
||||
|
||||
for (var u : snapshot.unavailable()) {
|
||||
txm.run(() -> {
|
||||
if (!persistentPeerDataService.isInitialSyncDone(u))
|
||||
return;
|
||||
|
||||
var curInfo = remoteTransaction.getData(PeerInfo.class, u.id()).orElse(null);
|
||||
if (curInfo == null) return;
|
||||
|
||||
var lastSeen = curInfo.lastSeenTimestamp();
|
||||
if (System.currentTimeMillis() - lastSeen > (lastSeenTimeout * 1000)) {
|
||||
var kicked = curInfo.withIncrementedKickCounter(persistentPeerDataService.getSelfUuid());
|
||||
remoteTransaction.putData(kicked);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -51,6 +51,8 @@ public class PersistentPeerDataService {
|
||||
PeerInfoService peerInfoService;
|
||||
@Inject
|
||||
TransactionManager txm;
|
||||
@Inject
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
|
||||
@ConfigProperty(name = "dhfs.peerdiscovery.preset-uuid")
|
||||
Optional<String> presetUuid;
|
||||
@@ -89,21 +91,6 @@ public class PersistentPeerDataService {
|
||||
Files.write(Path.of(stuffRoot, "self_uuid"), _selfUuid.id().toString().getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
|
||||
}
|
||||
|
||||
// private void pushPeerUpdates() {
|
||||
// pushPeerUpdates(null);
|
||||
// }
|
||||
|
||||
// private void pushPeerUpdates(@Nullable JObject<?> obj) {
|
||||
// if (obj != null)
|
||||
// Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated");
|
||||
// executorService.submit(() -> {
|
||||
// updateCerts();
|
||||
// invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName);
|
||||
// for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList()))
|
||||
// invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p));
|
||||
// });
|
||||
// }
|
||||
|
||||
public PeerId getSelfUuid() {
|
||||
return _selfUuid;
|
||||
}
|
||||
@@ -148,6 +135,7 @@ public class PersistentPeerDataService {
|
||||
}
|
||||
curTx.put(data.withInitialSyncDone(data.initialSyncDone().minus(peerId)));
|
||||
Log.infov("Did reset sync state for {0}", peerId);
|
||||
curTx.onCommit(() -> reachablePeerManager.handleConnectionError(peerId));
|
||||
return true;
|
||||
});
|
||||
}
|
||||
@@ -160,7 +148,6 @@ public class PersistentPeerDataService {
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
public List<IpPeerAddress> getPersistentPeerAddresses() {
|
||||
return txm.run(() -> {
|
||||
var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null);
|
||||
|
||||
@@ -30,9 +30,8 @@ import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class PeerManager {
|
||||
public class ReachablePeerManager {
|
||||
private final ConcurrentMap<PeerId, PeerAddress> _states = new ConcurrentHashMap<>();
|
||||
// FIXME: Ideally not call them on every ping
|
||||
private final Collection<PeerConnectedEventListener> _connectedListeners;
|
||||
private final Collection<PeerDisconnectedEventListener> _disconnectedListeners;
|
||||
@Inject
|
||||
@@ -59,7 +58,7 @@ public class PeerManager {
|
||||
SyncHandler syncHandler;
|
||||
private ExecutorService _heartbeatExecutor;
|
||||
|
||||
public PeerManager(Instance<PeerConnectedEventListener> connectedListeners, Instance<PeerDisconnectedEventListener> disconnectedListeners) {
|
||||
public ReachablePeerManager(Instance<PeerConnectedEventListener> connectedListeners, Instance<PeerDisconnectedEventListener> disconnectedListeners) {
|
||||
_connectedListeners = List.copyOf(connectedListeners.stream().toList());
|
||||
_disconnectedListeners = List.copyOf(disconnectedListeners.stream().toList());
|
||||
}
|
||||
@@ -125,19 +124,23 @@ public class PeerManager {
|
||||
}
|
||||
}
|
||||
|
||||
public void handleConnectionError(com.usatiuk.dhfs.peersync.PeerInfo host) {
|
||||
public void handleConnectionError(PeerId host) {
|
||||
boolean wasReachable = isReachable(host);
|
||||
|
||||
if (wasReachable)
|
||||
Log.infov("Lost connection to {0}", host);
|
||||
|
||||
_states.remove(host.id());
|
||||
_states.remove(host);
|
||||
|
||||
for (var l : _disconnectedListeners) {
|
||||
l.handlePeerDisconnected(host.id());
|
||||
l.handlePeerDisconnected(host);
|
||||
}
|
||||
}
|
||||
|
||||
public void handleConnectionError(com.usatiuk.dhfs.peersync.PeerInfo host) {
|
||||
handleConnectionError(host.id());
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
private boolean pingCheck(com.usatiuk.dhfs.peersync.PeerInfo host, PeerAddress address) {
|
||||
try {
|
||||
@@ -1,7 +1,6 @@
|
||||
package com.usatiuk.dhfs.peertrust;
|
||||
|
||||
import com.usatiuk.dhfs.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
||||
import com.usatiuk.dhfs.peersync.PeerInfo;
|
||||
import com.usatiuk.dhfs.peersync.PeerInfoService;
|
||||
@@ -43,7 +42,7 @@ public class PeerInfoCertUpdateTxHook implements PreCommitTxHook {
|
||||
for (var curRef : oldNode.node().children().entrySet()) {
|
||||
if (!n.node().children().containsKey(curRef.getKey())) {
|
||||
Log.infov("Will reset sync state for {0}", curRef.getValue());
|
||||
curTx.onCommit(() -> persistentPeerDataService.resetInitialSyncDone(JKleppmannTreeNodeMetaPeer.nodeIdToPeerId(curRef.getValue())));
|
||||
persistentPeerDataService.resetInitialSyncDone(JKleppmannTreeNodeMetaPeer.nodeIdToPeerId(curRef.getValue()));
|
||||
}
|
||||
}
|
||||
return;
|
||||
@@ -54,9 +53,16 @@ public class PeerInfoCertUpdateTxHook implements PreCommitTxHook {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(remote.data() instanceof PeerInfo))
|
||||
if (!(remote.data() instanceof PeerInfo curPi))
|
||||
return;
|
||||
|
||||
var oldPi = (PeerInfo) ((RemoteObjectDataWrapper) old).data();
|
||||
|
||||
if (oldPi.kickCounterSum() != curPi.kickCounterSum()) {
|
||||
Log.warnv("Peer kicked out: {0} to {1}", key, cur);
|
||||
persistentPeerDataService.resetInitialSyncDone(curPi.id());
|
||||
}
|
||||
|
||||
Log.infov("Changed peer info: {0} to {1}", key, cur);
|
||||
|
||||
curTx.onCommit(() -> invalidationQueueService.pushInvalidationToAll(key));
|
||||
|
||||
@@ -20,7 +20,7 @@ public class IndexUpdateOpHandler implements OpHandler<IndexUpdateOp> {
|
||||
@Override
|
||||
public void handleOp(PeerId from, IndexUpdateOp op) {
|
||||
txm.run(() -> {
|
||||
syncHandler.handleRemoteUpdate(from, op.key(), op.changelog(), null);
|
||||
syncHandler.handleRemoteUpdate(from, op.key(), op.changelog(), op.data());
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package com.usatiuk.dhfs.remoteobj;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface JDataRemotePush {
|
||||
}
|
||||
@@ -132,7 +132,7 @@ public class RemoteObjectDeleter {
|
||||
return true;
|
||||
}
|
||||
|
||||
var knownHosts = peerInfoService.getPeersNoSelf();
|
||||
var knownHosts = peerInfoService.getSynchronizedPeersNoSelf();
|
||||
RemoteObjectMeta finalTarget = target;
|
||||
List<PeerId> missing = knownHosts.stream()
|
||||
.map(PeerInfo::id)
|
||||
@@ -187,7 +187,7 @@ public class RemoteObjectDeleter {
|
||||
if (!obj.seen())
|
||||
return true;
|
||||
|
||||
var knownHosts = peerInfoService.getPeersNoSelf();
|
||||
var knownHosts = peerInfoService.getSynchronizedPeersNoSelf();
|
||||
boolean missing = false;
|
||||
for (var x : knownHosts) {
|
||||
if (!obj.confirmedDeletes().contains(x.id())) {
|
||||
|
||||
@@ -3,7 +3,6 @@ package com.usatiuk.dhfs.remoteobj;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.rpc.RemoteObjectServiceClient;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.inject.Inject;
|
||||
@@ -55,11 +54,11 @@ public class RemoteTransaction {
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private <T extends JDataRemote> Optional<T> getData(Class<T> type, JObjectKey key, LockingStrategy strategy, boolean tryRequest) {
|
||||
return curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(key), strategy)
|
||||
private <T extends JDataRemote> Optional<T> getData(Class<T> type, JObjectKey key, boolean tryRequest) {
|
||||
return curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(key))
|
||||
.flatMap(obj -> {
|
||||
if (obj.hasLocalData()) {
|
||||
var realData = curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(key), strategy).orElse(null);
|
||||
var realData = curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(key)).orElse(null);
|
||||
if (realData == null)
|
||||
throw new IllegalStateException("Local data not found for " + key); // TODO: Race
|
||||
if (!type.isInstance(realData.data()))
|
||||
@@ -72,8 +71,8 @@ public class RemoteTransaction {
|
||||
});
|
||||
}
|
||||
|
||||
public Optional<RemoteObjectMeta> getMeta(JObjectKey key, LockingStrategy strategy) {
|
||||
return curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(key), strategy);
|
||||
public Optional<RemoteObjectMeta> getMeta(JObjectKey key) {
|
||||
return curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(key));
|
||||
}
|
||||
|
||||
public <T extends JDataRemote> void putDataRaw(T obj) {
|
||||
@@ -127,23 +126,12 @@ public class RemoteTransaction {
|
||||
curTx.put(newData);
|
||||
}
|
||||
|
||||
public Optional<RemoteObjectMeta> getMeta(JObjectKey key) {
|
||||
return getMeta(key, LockingStrategy.OPTIMISTIC);
|
||||
}
|
||||
|
||||
public <T extends JDataRemote> Optional<T> getData(Class<T> type, JObjectKey key) {
|
||||
return getData(type, key, LockingStrategy.OPTIMISTIC, true);
|
||||
return getData(type, key, true);
|
||||
}
|
||||
|
||||
public <T extends JDataRemote> Optional<T> getDataLocal(Class<T> type, JObjectKey key) {
|
||||
return getData(type, key, LockingStrategy.OPTIMISTIC, false);
|
||||
return getData(type, key, false);
|
||||
}
|
||||
|
||||
public <T extends JDataRemote> Optional<T> getData(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
return getData(type, key, strategy, true);
|
||||
}
|
||||
|
||||
public <T extends JDataRemote> Optional<T> getDataLocal(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
return getData(type, key, strategy, false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,10 @@ import org.pcollections.PMap;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.lang.reflect.ParameterizedType;
|
||||
import java.util.*;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
@@ -124,52 +127,42 @@ public class SyncHandler {
|
||||
public void resyncAfterCrash(@Observes @Priority(100000) StartupEvent event) {
|
||||
if (shutdownChecker.lastShutdownClean())
|
||||
return;
|
||||
List<JObjectKey> objs = new LinkedList<>();
|
||||
txm.run(() -> {
|
||||
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
|
||||
while (it.hasNext()) {
|
||||
var key = it.peekNextKey();
|
||||
objs.add(key);
|
||||
// TODO: Nested transactions
|
||||
txm.run(() -> {
|
||||
var proc = curTx.get(JData.class, key).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
|
||||
if (proc != null) {
|
||||
proc.handleCrash(key);
|
||||
}
|
||||
Log.infov("Handled crash of {0}", key);
|
||||
|
||||
}, true);
|
||||
it.skip();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for (var obj : objs) {
|
||||
txm.run(() -> {
|
||||
var proc = curTx.get(JData.class, obj).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
|
||||
if (proc != null) {
|
||||
proc.handleCrash(obj);
|
||||
}
|
||||
Log.infov("Handled crash of {0}", obj);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void doInitialSync(PeerId peer) {
|
||||
List<JObjectKey> objs = new LinkedList<>();
|
||||
txm.run(() -> {
|
||||
Log.tracev("Will do initial sync for {0}", peer);
|
||||
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
|
||||
while (it.hasNext()) {
|
||||
var key = it.peekNextKey();
|
||||
objs.add(key);
|
||||
// TODO: Nested transactions
|
||||
txm.run(() -> {
|
||||
var proc = curTx.get(JData.class, key).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
|
||||
if (proc != null) {
|
||||
proc.prepareForInitialSync(peer, key);
|
||||
}
|
||||
Log.infov("Adding to initial sync for peer {0}: {1}", peer, key);
|
||||
invalidationQueueService.pushInvalidationToOne(peer, key);
|
||||
|
||||
}, true);
|
||||
it.skip();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for (var obj : objs) {
|
||||
txm.run(() -> {
|
||||
var proc = curTx.get(JData.class, obj).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
|
||||
if (proc != null) {
|
||||
proc.prepareForInitialSync(peer, obj);
|
||||
}
|
||||
Log.infov("Adding to initial sync for peer {0}: {1}", peer, obj);
|
||||
invalidationQueueService.pushInvalidationToOne(peer, obj);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,10 +4,9 @@ import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.invalidation.Op;
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PeerManager;
|
||||
import com.usatiuk.dhfs.peersync.ReachablePeerManager;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.persistence.JObjectKeyP;
|
||||
import com.usatiuk.dhfs.refcount.JDataRef;
|
||||
import com.usatiuk.dhfs.remoteobj.ReceivedObject;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
@@ -52,7 +51,7 @@ public class RemoteObjectServiceClient {
|
||||
@Inject
|
||||
ProtoSerializer<GetObjectReply, ReceivedObject> receivedObjectProtoSerializer;
|
||||
@Inject
|
||||
PeerManager peerManager;
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
|
||||
public Pair<PeerId, ReceivedObject> getSpecificObject(JObjectKey key, PeerId peerId) {
|
||||
return rpcClientFactory.withObjSyncClient(peerId, (peer, client) -> {
|
||||
@@ -71,7 +70,7 @@ public class RemoteObjectServiceClient {
|
||||
|
||||
var targetVersion = objMeta.versionSum();
|
||||
var targets = objMeta.knownRemoteVersions().isEmpty()
|
||||
? peerManager.getAvailableHosts()
|
||||
? reachablePeerManager.getAvailableHosts()
|
||||
: objMeta.knownRemoteVersions().entrySet().stream()
|
||||
.filter(entry -> entry.getValue().equals(targetVersion))
|
||||
.map(Map.Entry::getKey).toList();
|
||||
|
||||
@@ -5,7 +5,7 @@ import com.usatiuk.dhfs.autosync.AutosyncProcessor;
|
||||
import com.usatiuk.dhfs.invalidation.Op;
|
||||
import com.usatiuk.dhfs.invalidation.OpHandlerService;
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PeerManager;
|
||||
import com.usatiuk.dhfs.peersync.ReachablePeerManager;
|
||||
import com.usatiuk.dhfs.persistence.JObjectKeyP;
|
||||
import com.usatiuk.dhfs.remoteobj.*;
|
||||
import com.usatiuk.dhfs.repository.*;
|
||||
@@ -31,7 +31,7 @@ public class RemoteObjectServiceServerImpl {
|
||||
@Inject
|
||||
TransactionManager txm;
|
||||
@Inject
|
||||
PeerManager peerManager;
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import com.usatiuk.dhfs.peerdiscovery.IpPeerAddress;
|
||||
import com.usatiuk.dhfs.peerdiscovery.PeerAddress;
|
||||
import com.usatiuk.dhfs.peersync.PeerDisconnectedEventListener;
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PeerManager;
|
||||
import com.usatiuk.dhfs.peersync.ReachablePeerManager;
|
||||
import com.usatiuk.dhfs.repository.DhfsObjectSyncGrpcGrpc;
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.Status;
|
||||
@@ -29,7 +29,7 @@ public class RpcClientFactory implements PeerDisconnectedEventListener {
|
||||
long syncTimeout;
|
||||
|
||||
@Inject
|
||||
PeerManager peerManager;
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
|
||||
@Inject
|
||||
RpcChannelFactory rpcChannelFactory;
|
||||
@@ -56,7 +56,7 @@ public class RpcClientFactory implements PeerDisconnectedEventListener {
|
||||
}
|
||||
|
||||
public <R> R withObjSyncClient(PeerId target, ObjectSyncClientFunction<R> fn) {
|
||||
var hostinfo = peerManager.getAddress(target);
|
||||
var hostinfo = reachablePeerManager.getAddress(target);
|
||||
|
||||
if (hostinfo == null)
|
||||
throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Not known to be reachable: " + target));
|
||||
|
||||
@@ -2,7 +2,7 @@ package com.usatiuk.dhfs.webapi;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PeerInfoService;
|
||||
import com.usatiuk.dhfs.peersync.PeerManager;
|
||||
import com.usatiuk.dhfs.peersync.ReachablePeerManager;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.ws.rs.*;
|
||||
@@ -14,7 +14,7 @@ public class PeerManagementApi {
|
||||
@Inject
|
||||
PeerInfoService peerInfoService;
|
||||
@Inject
|
||||
PeerManager peerManager;
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@@ -23,27 +23,27 @@ public class PeerManagementApi {
|
||||
public List<PeerInfo> knownPeers() {
|
||||
return peerInfoService.getPeers().stream().map(
|
||||
peerInfo -> new PeerInfo(peerInfo.id().toString(), Base64.getEncoder().encodeToString(peerInfo.cert().toByteArray()),
|
||||
Optional.ofNullable(peerManager.getAddress(peerInfo.id())).map(Objects::toString).orElse(null))).toList();
|
||||
Optional.ofNullable(reachablePeerManager.getAddress(peerInfo.id())).map(Objects::toString).orElse(null))).toList();
|
||||
}
|
||||
|
||||
@Path("known-peers/{peerId}")
|
||||
@PUT
|
||||
public void addPeer(@PathParam("peerId") String peerId, KnownPeerPut knownPeerPut) {
|
||||
peerManager.addRemoteHost(PeerId.of(peerId), knownPeerPut.cert());
|
||||
reachablePeerManager.addRemoteHost(PeerId.of(peerId), knownPeerPut.cert());
|
||||
}
|
||||
|
||||
@Path("known-peers/{peerId}")
|
||||
@DELETE
|
||||
public void deletePeer(@PathParam("peerId") String peerId) {
|
||||
peerManager.removeRemoteHost(PeerId.of(peerId));
|
||||
reachablePeerManager.removeRemoteHost(PeerId.of(peerId));
|
||||
}
|
||||
|
||||
@Path("available-peers")
|
||||
@GET
|
||||
public Collection<PeerInfo> availablePeers() {
|
||||
return peerManager.getSeenButNotAddedHosts().stream()
|
||||
return reachablePeerManager.getSeenButNotAddedHosts().stream()
|
||||
.map(p -> new PeerInfo(p.getLeft().toString(), p.getRight().cert(),
|
||||
peerManager.selectBestAddress(p.getLeft()).map(Objects::toString).orElse(null)))
|
||||
reachablePeerManager.selectBestAddress(p.getLeft()).map(Objects::toString).orElse(null)))
|
||||
.toList();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package com.usatiuk.dhfs.webapi;
|
||||
import com.usatiuk.dhfs.peerdiscovery.PeerAddrStringHelper;
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PeerInfoService;
|
||||
import com.usatiuk.dhfs.peersync.PeerManager;
|
||||
import com.usatiuk.dhfs.peersync.ReachablePeerManager;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.ws.rs.*;
|
||||
@@ -15,7 +15,7 @@ public class PersistentPeerAddressApi {
|
||||
@Inject
|
||||
PeerInfoService peerInfoService;
|
||||
@Inject
|
||||
PeerManager peerManager;
|
||||
ReachablePeerManager reachablePeerManager;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ public class UninitializedByteBuffer {
|
||||
);
|
||||
|
||||
public static ByteBuffer allocate(int capacity) {
|
||||
UnsafeAccessor.get().getNioAccess().reserveMemory(capacity, capacity);
|
||||
UnsafeAccessor.NIO.reserveMemory(capacity, capacity);
|
||||
|
||||
MemorySegment segment = null;
|
||||
try {
|
||||
@@ -29,7 +29,7 @@ public class UninitializedByteBuffer {
|
||||
Consumer<MemorySegment> cleanup = s -> {
|
||||
try {
|
||||
free.invokeExact(s);
|
||||
UnsafeAccessor.get().getNioAccess().unreserveMemory(capacity, capacity);
|
||||
UnsafeAccessor.NIO.unreserveMemory(capacity, capacity);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
@@ -39,6 +39,6 @@ public class UninitializedByteBuffer {
|
||||
}
|
||||
|
||||
public static long getAddress(ByteBuffer buffer) {
|
||||
return UnsafeAccessor.get().getNioAccess().getBufferAddress(buffer);
|
||||
return UnsafeAccessor.NIO.getBufferAddress(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,36 +6,18 @@ import sun.misc.Unsafe;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
public class UnsafeAccessor {
|
||||
private static final UnsafeAccessor INSTANCE;
|
||||
public abstract class UnsafeAccessor {
|
||||
public static final JavaNioAccess NIO;
|
||||
public static final Unsafe UNSAFE;
|
||||
|
||||
static {
|
||||
try {
|
||||
INSTANCE = new UnsafeAccessor();
|
||||
NIO = SharedSecrets.getJavaNioAccess();
|
||||
Field f = Unsafe.class.getDeclaredField("theUnsafe");
|
||||
f.setAccessible(true);
|
||||
UNSAFE = (Unsafe) f.get(null);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static UnsafeAccessor get() {
|
||||
return INSTANCE;
|
||||
}
|
||||
|
||||
private JavaNioAccess _nioAccess;
|
||||
private Unsafe _unsafe;
|
||||
|
||||
private UnsafeAccessor() throws NoSuchFieldException, IllegalAccessException {
|
||||
_nioAccess = SharedSecrets.getJavaNioAccess();
|
||||
Field f = Unsafe.class.getDeclaredField("theUnsafe");
|
||||
f.setAccessible(true);
|
||||
_unsafe = (Unsafe) f.get(null);
|
||||
}
|
||||
|
||||
public JavaNioAccess getNioAccess() {
|
||||
return _nioAccess;
|
||||
}
|
||||
|
||||
public Unsafe getUnsafe() {
|
||||
return _unsafe;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.utils;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface VoidFn {
|
||||
void apply();
|
||||
}
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.utils;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface VoidFnThrows {
|
||||
void apply() throws Throwable;
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ java \
|
||||
-Dquarkus.log.category.\"com.usatiuk\".level=INFO \
|
||||
-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=INFO \
|
||||
-Ddhfs.webui.root="$SCRIPT_DIR"/Webui $EXTRAOPTS_PARSED \
|
||||
-jar "$SCRIPT_DIR"/"DHFS Package"/quarkus-run.jar >quarkus.log 2>&1 &
|
||||
-jar "$SCRIPT_DIR"/"Server"/quarkus-run.jar >quarkus.log 2>&1 &
|
||||
|
||||
echo "Started $!"
|
||||
|
||||
|
||||
48
run-wrapper/run.ps1
Normal file
48
run-wrapper/run.ps1
Normal file
@@ -0,0 +1,48 @@
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
$PIDFILE = Join-Path $PSScriptRoot ".pid"
|
||||
$EXTRAOPTS = Join-Path $PSScriptRoot "extra-opts"
|
||||
|
||||
if (-Not (Test-Path $EXTRAOPTS)) {
|
||||
New-Item -ItemType File -Path $EXTRAOPTS | Out-Null
|
||||
}
|
||||
|
||||
if (Test-Path $PIDFILE) {
|
||||
$ReadPID = Get-Content $PIDFILE
|
||||
if (Get-Process -Id $ReadPID -ErrorAction SilentlyContinue) {
|
||||
Write-Host "Already running: $ReadPID"
|
||||
exit 2
|
||||
}
|
||||
}
|
||||
|
||||
$ExtraOptsParsed = Get-Content $EXTRAOPTS | Where-Object {$_}
|
||||
|
||||
Write-Host "Extra options: $($ExtraOptsParsed -join ' ')"
|
||||
|
||||
$JAVA_OPTS = @(
|
||||
"-Xmx512M"
|
||||
"--enable-preview"
|
||||
"-Ddhfs.objects.writeback.limit=134217728"
|
||||
"-Ddhfs.objects.lru.limit=134217728"
|
||||
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED"
|
||||
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED"
|
||||
"--add-opens=java.base/java.nio=ALL-UNNAMED"
|
||||
"-Ddhfs.objects.persistence.files.root=$($PSScriptRoot)\..\data\objects"
|
||||
"-Ddhfs.objects.persistence.stuff.root=$($PSScriptRoot)\..\data\stuff"
|
||||
"-Ddhfs.objects.persistence.lmdb.size=1000000000"
|
||||
"-Ddhfs.fuse.root=Z:\"
|
||||
"-Dquarkus.http.host=0.0.0.0"
|
||||
'-Dquarkus.log.category.\"com.usatiuk\".level=INFO'
|
||||
'-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=INFO'
|
||||
"-Ddhfs.webui.root=$($PSScriptRoot)\Webui"
|
||||
) + $ExtraOptsParsed + @(
|
||||
"-jar", "`"$PSScriptRoot\Server\quarkus-run.jar`""
|
||||
)
|
||||
|
||||
$Process = Start-Process -FilePath "java" -ArgumentList $JAVA_OPTS `
|
||||
-RedirectStandardOutput "$PSScriptRoot\quarkus.log" `
|
||||
-RedirectStandardError "$PSScriptRoot\quarkus.log.err" `
|
||||
-NoNewWindow -PassThru
|
||||
|
||||
Write-Host "Started $($Process.Id)"
|
||||
$Process.Id | Out-File -FilePath $PIDFILE
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user