mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-28 12:37:48 +01:00
Compare commits
126 Commits
cursed-ite
...
7784d975d7
| Author | SHA1 | Date | |
|---|---|---|---|
| 7784d975d7 | |||
| d0b45177dd | |||
| 838405fb46 | |||
| dbad8a2b22 | |||
| 66dabdef25 | |||
| 87e127bdfb | |||
| fd62543687 | |||
| 757a0bbc8a | |||
| 0c3524851e | |||
| 3eb7164c0f | |||
| f544a67fb5 | |||
| 964b3da951 | |||
| cb33472dc5 | |||
| de211bb2d2 | |||
| 56ab3bad4c | |||
| 9403556220 | |||
| 469a6b9011 | |||
| 52ccbb99bc | |||
| d972cd1562 | |||
| 80151bcca5 | |||
| 289a2b880e | |||
| 0849df60ae | |||
| 9cb5c226f9 | |||
| 87c404828c | |||
| b074e8eb44 | |||
| eb5b0ae03c | |||
| c329c1f982 | |||
| 4e7b13227b | |||
| db51d7280c | |||
| 70fecb389b | |||
| 6e9a2b25f6 | |||
| b84ef95703 | |||
| c0735801b9 | |||
| b506ced9d5 | |||
| 46bc9fa810 | |||
| 8ab034402d | |||
| d94d11ec8b | |||
| 5beaad2d32 | |||
| c4484d21e5 | |||
| 2766ef1bae | |||
| 58de85c078 | |||
| cc9da86440 | |||
| e6c9e6aee9 | |||
| 62265355c4 | |||
| 854bce1627 | |||
| 1b19c77bb6 | |||
| 7aa968a569 | |||
| e348c39be1 | |||
| 1b54830651 | |||
| bc5f0b816c | |||
| 9ff914bdaa | |||
| 1cee6f62b8 | |||
| 81703a9406 | |||
| 1757034e0b | |||
| d9765a51d8 | |||
| 99ef560b95 | |||
| f87eb365c3 | |||
| 8d3244fe64 | |||
| 0a8985c93f | |||
| a8cf483eee | |||
| f7338f4e80 | |||
| b89b182c58 | |||
| ad4ce72fdd | |||
| 26ba65fdce | |||
| 697add66d5 | |||
| a53fc5e973 | |||
| b034591091 | |||
| 07133a7186 | |||
| 8cbecf1714 | |||
| 16ba692019 | |||
| e5be1e6164 | |||
| c74fdfc5a6 | |||
| c4268ab35b | |||
| 2ab6e3c3f7 | |||
| ec8546bd69 | |||
| 6ecef94b90 | |||
| e7f22d783f | |||
| bed55162d7 | |||
| f43c6db4f0 | |||
| 56a15f4672 | |||
| 85a1fa09ab | |||
| cca0b410cf | |||
| d94abfee97 | |||
|
|
6bd92ad7cd | ||
| 1965d93f25 | |||
| f6685f45f9 | |||
| 060ab1767d | |||
| 89d87095c8 | |||
| 7425c1f312 | |||
| 428eca325f | |||
| 005bc35496 | |||
| 6685575ca5 | |||
| 1ae813aacd | |||
| e81671251a | |||
| add26bb156 | |||
| 4060045f15 | |||
| 75b484d5b2 | |||
| 1d9dc8ed4d | |||
| 7a85704862 | |||
| 367eedd540 | |||
| d01b9204f7 | |||
| 67fdacc3ff | |||
| 6ed9051be1 | |||
| abf95ba847 | |||
| 6a9f64439f | |||
| ceb9342b45 | |||
| ca354ba09c | |||
| 81af021292 | |||
| 0c04079258 | |||
| 2e2eb3ac97 | |||
| e2e756e7c5 | |||
| 04e932ed62 | |||
| aeec66389d | |||
| adc7356d4a | |||
| 16da05292f | |||
| b0149b7251 | |||
| 24416c1e87 | |||
| 34db870fc6 | |||
| 0e62a29ce0 | |||
| 7de5f91fd2 | |||
| ac68208b1a | |||
| 4e0675940e | |||
| 4f5f347b3c | |||
| bd5395e03f | |||
| f56f564e8b | |||
| eaa413e200 |
69
.github/workflows/server.yml
vendored
69
.github/workflows/server.yml
vendored
@@ -20,26 +20,21 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: "recursive"
|
||||
|
||||
- name: Install sudo for ACT
|
||||
run: apt-get update && apt-get install -y sudo
|
||||
if: env.ACT=='true'
|
||||
|
||||
- name: Install fuse and maven
|
||||
run: sudo apt-get update && sudo apt-get install -y libfuse2
|
||||
- name: Install FUSE
|
||||
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
|
||||
|
||||
- name: Download maven
|
||||
run: |
|
||||
cd "$HOME"
|
||||
mkdir maven-bin
|
||||
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
|
||||
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
|
||||
- name: User allow other for fuse
|
||||
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
|
||||
|
||||
- name: Maven info
|
||||
run: |
|
||||
echo $GITHUB_PATH
|
||||
echo $PATH
|
||||
mvn -v
|
||||
- name: Dump fuse.conf
|
||||
run: cat /etc/fuse.conf
|
||||
|
||||
- name: Set up JDK 21
|
||||
uses: actions/setup-java@v4
|
||||
@@ -48,8 +43,11 @@ jobs:
|
||||
distribution: "zulu"
|
||||
cache: maven
|
||||
|
||||
- name: Build LazyFS
|
||||
run: cd thirdparty/lazyfs/ && ./build.sh
|
||||
|
||||
- name: Test with Maven
|
||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
|
||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify javadoc:aggregate
|
||||
|
||||
# - name: Build with Maven
|
||||
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
|
||||
@@ -57,7 +55,12 @@ jobs:
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-parent/dhfs-app/target/quarkus-app
|
||||
path: dhfs-parent/dhfs-fuse/target/quarkus-app
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Javadocs
|
||||
path: dhfs-parent/target/reports/apidocs/
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ always() }}
|
||||
@@ -214,7 +217,7 @@ jobs:
|
||||
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
|
||||
|
||||
- name: Copy DHFS
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
|
||||
|
||||
- name: Copy Webui
|
||||
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
|
||||
@@ -233,3 +236,37 @@ jobs:
|
||||
with:
|
||||
name: Run wrapper
|
||||
path: ~/run-wrapper.tar.gz
|
||||
|
||||
publish-javadoc:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
needs: [build-webui, build-dhfs]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: DHFS Javadocs
|
||||
path: dhfs-javadocs-downloaded
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
# Upload entire repository
|
||||
path: 'dhfs-javadocs-downloaded'
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
|
||||
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "thirdparty/lazyfs/lazyfs"]
|
||||
path = thirdparty/lazyfs/lazyfs
|
||||
url = git@github.com:dsrhaslab/lazyfs.git
|
||||
@@ -14,6 +14,9 @@ Syncthing and allowing you to stream your files like Google Drive File Stream
|
||||
This is a simple wrapper around the jar/web ui distribution that allows you to run/stop
|
||||
the DHFS server in the background, and update itself (hopefully!)
|
||||
|
||||
## How to use it and how it works?
|
||||
## How to use it?
|
||||
|
||||
TODO 😁
|
||||
|
||||
Unpack the run-wrapper and run the `run` script. The filesystem should be mounted to the `fuse` folder in the run-wrapper root directory.
|
||||
|
||||
Then, a web interface will be available at `losthost:8080`, that can be used to connect with other peers.
|
||||
|
||||
2
dhfs-parent/.gitignore
vendored
2
dhfs-parent/.gitignore
vendored
@@ -41,3 +41,5 @@ nb-configuration.xml
|
||||
|
||||
# Plugin directory
|
||||
/.quarkus/cli/plugins/
|
||||
|
||||
.jqwik-database
|
||||
@@ -1,11 +1,11 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.app.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:8080:9011" />
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||
<module name="dhfs-fuse" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.app.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions --enable-preview -XX:+UseParallelGC -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=8080 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||
<module name="dhfs-fuse" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
*
|
||||
!target/*-runner
|
||||
!target/*-runner.jar
|
||||
!target/lib/*
|
||||
!target/quarkus-app/*
|
||||
43
dhfs-parent/dhfs-app/.gitignore
vendored
43
dhfs-parent/dhfs-app/.gitignore
vendored
@@ -1,43 +0,0 @@
|
||||
#Maven
|
||||
target/
|
||||
pom.xml.tag
|
||||
pom.xml.releaseBackup
|
||||
pom.xml.versionsBackup
|
||||
release.properties
|
||||
.flattened-pom.xml
|
||||
|
||||
# Eclipse
|
||||
.project
|
||||
.classpath
|
||||
.settings/
|
||||
bin/
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
*.ipr
|
||||
*.iml
|
||||
*.iws
|
||||
|
||||
# NetBeans
|
||||
nb-configuration.xml
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode
|
||||
.factorypath
|
||||
|
||||
# OSX
|
||||
.DS_Store
|
||||
|
||||
# Vim
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# patch
|
||||
*.orig
|
||||
*.rej
|
||||
|
||||
# Local environment
|
||||
.env
|
||||
|
||||
# Plugin directory
|
||||
/.quarkus/cli/plugins/
|
||||
@@ -1,2 +0,0 @@
|
||||
FROM azul/zulu-openjdk-debian:21-jre-latest
|
||||
RUN apt update && apt install -y libfuse2 curl
|
||||
@@ -1,43 +0,0 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
dhfs1:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs1:/dhfs_root
|
||||
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 8081:8443
|
||||
- 5005:5005
|
||||
dhfs2:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs2:/dhfs_root
|
||||
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8090:8080
|
||||
- 8091:8443
|
||||
- 5010:5010
|
||||
@@ -1,210 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-app</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.openhft</groupId>
|
||||
<artifactId>zero-allocation-hashing</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-scheduler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.slf4j</groupId>
|
||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fuse</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
true
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
</junit.jupiter.execution.parallel.mode.default>
|
||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
0.5
|
||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<extensions>true</extensions>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,97 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
# We make four distinct layers so if there are application changes the library layers can be re-used
|
||||
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
|
||||
COPY --chown=185 target/quarkus-app/*.jar /deployments/
|
||||
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
|
||||
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
COPY target/lib/* /deployments/lib/
|
||||
COPY target/*-runner.jar /deployments/quarkus-run.jar
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
@@ -1,27 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -1,30 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
# It uses a micro base image, tuned for Quarkus native executables.
|
||||
# It reduces the size of the resulting container image.
|
||||
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM quay.io/quarkus/quarkus-micro-image:2.0
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -1,34 +0,0 @@
|
||||
quarkus.grpc.server.use-separate-server=false
|
||||
dhfs.objects.peerdiscovery.port=42069
|
||||
dhfs.objects.peerdiscovery.interval=4s
|
||||
dhfs.objects.peerdiscovery.broadcast=true
|
||||
dhfs.objects.sync.timeout=30
|
||||
dhfs.objects.sync.ping.timeout=5
|
||||
dhfs.objects.invalidation.threads=16
|
||||
dhfs.objects.invalidation.delay=1000
|
||||
dhfs.objects.reconnect_interval=5s
|
||||
dhfs.objects.write_log=false
|
||||
dhfs.objects.periodic-push-op-interval=5m
|
||||
dhfs.fuse.root=${HOME}/dhfs_default/fuse
|
||||
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=2097152
|
||||
dhfs.files.target_chunk_alignment=19
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.files.use_hash_for_chunks=false
|
||||
dhfs.objects.autosync.threads=16
|
||||
dhfs.objects.autosync.download-all=false
|
||||
dhfs.objects.move-processor.threads=16
|
||||
dhfs.objects.ref-processor.threads=16
|
||||
dhfs.objects.opsender.batch-size=100
|
||||
dhfs.objects.lock_timeout_secs=2
|
||||
dhfs.local-discovery=true
|
||||
dhfs.peerdiscovery.timeout=10000
|
||||
quarkus.log.category."com.usatiuk".min-level=TRACE
|
||||
quarkus.log.category."com.usatiuk".level=TRACE
|
||||
quarkus.http.insecure-requests=enabled
|
||||
quarkus.http.ssl.client-auth=required
|
||||
@@ -1,44 +0,0 @@
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Objects;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TestDataCleaner {
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
String tempDirectory;
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
||||
try {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
} catch (Exception ignored) {
|
||||
Log.warn("Couldn't cleanup test data on init");
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
public static void purgeDirectory(File dir) {
|
||||
try {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.error("Couldn't purge directory " + dir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
|
||||
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
|
||||
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.objects.deletion.delay=0
|
||||
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
|
||||
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
|
||||
quarkus.http.test-port=0
|
||||
quarkus.http.test-ssl-port=0
|
||||
dhfs.local-discovery=false
|
||||
dhfs.objects.persistence.snapshot-extra-checks=true
|
||||
@@ -72,26 +72,6 @@
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
@@ -122,26 +102,11 @@
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
|
||||
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return data.size();
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.persistence.ChunkDataP;
|
||||
import com.usatiuk.dhfs.persistence.JObjectKeyP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class ChunkDataProtoSerializer implements ProtoSerializer<ChunkDataP, ChunkData> {
|
||||
@Override
|
||||
public ChunkData deserialize(ChunkDataP message) {
|
||||
return new ChunkData(
|
||||
JObjectKey.of(message.getKey().getName()),
|
||||
message.getData()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChunkDataP serialize(ChunkData object) {
|
||||
return ChunkDataP.newBuilder()
|
||||
.setKey(JObjectKeyP.newBuilder().setName(object.key().value()).build())
|
||||
.setData(object.data())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
|
||||
@Override
|
||||
public Class<? extends JDataRemote> objClass() {
|
||||
return File.class;
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.persistence.FileDtoP;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@Singleton
|
||||
public class FileProtoSerializer implements ProtoSerializer<FileDtoP, FileDto> {
|
||||
@Override
|
||||
public FileDto deserialize(FileDtoP message) {
|
||||
try (var is = message.getSerializedData().newInput()) {
|
||||
return SerializationHelper.deserialize(is);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileDtoP serialize(FileDto object) {
|
||||
return FileDtoP.newBuilder().setSerializedData(SerializationHelper.serialize(object)).build();
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public interface DhfsFileService {
|
||||
Optional<JObjectKey> open(String name);
|
||||
|
||||
Optional<JObjectKey> create(String name, long mode);
|
||||
|
||||
Pair<String, JObjectKey> inoToParent(JObjectKey ino);
|
||||
|
||||
void mkdir(String name, long mode);
|
||||
|
||||
Optional<GetattrRes> getattr(JObjectKey name);
|
||||
|
||||
Boolean chmod(JObjectKey name, long mode);
|
||||
|
||||
void unlink(String name);
|
||||
|
||||
Boolean rename(String from, String to);
|
||||
|
||||
Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs);
|
||||
|
||||
Iterable<String> readDir(String name);
|
||||
|
||||
long size(JObjectKey fileUuid);
|
||||
|
||||
Optional<ByteString> read(JObjectKey fileUuid, long offset, int length);
|
||||
|
||||
Long write(JObjectKey fileUuid, long offset, ByteString data);
|
||||
|
||||
default Long write(JObjectKey fileUuid, long offset, byte[] data) {
|
||||
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
|
||||
}
|
||||
|
||||
Boolean truncate(JObjectKey fileUuid, long length);
|
||||
|
||||
String readlink(JObjectKey uuid);
|
||||
|
||||
ByteString readlinkBS(JObjectKey uuid);
|
||||
|
||||
JObjectKey symlink(String oldpath, String newpath);
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
public class DirectoryNotEmptyException extends RuntimeException {
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
/**
|
||||
* ChunkData is a data structure that represents an immutable binary blob
|
||||
* @param key unique key
|
||||
* @param data binary data
|
||||
*/
|
||||
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return data.size();
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,22 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.jmap.JMapHolder;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* File is a data structure that represents a file in the file system
|
||||
* @param key unique key
|
||||
* @param mode file mode
|
||||
* @param cTime creation time
|
||||
* @param mTime modification time
|
||||
* @param symlink true if the file is a symlink, false otherwise
|
||||
*/
|
||||
public record File(JObjectKey key, long mode, long cTime, long mTime,
|
||||
boolean symlink
|
||||
) implements JDataRemote, JMapHolder<JMapLongKey> {
|
||||
@@ -28,6 +36,10 @@ public record File(JObjectKey key, long mode, long cTime, long mTime,
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withCurrentMTime() {
|
||||
return new File(key, mode, cTime, System.currentTimeMillis(), symlink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return Set.of();
|
||||
@@ -0,0 +1,20 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* FileDto is a data transfer object that contains a file and its chunks.
|
||||
* @param file the file
|
||||
* @param chunks the list of chunks, each represented as a pair of a long and a JObjectKey
|
||||
*/
|
||||
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
|
||||
@Override
|
||||
public Class<? extends JDataRemote> objClass() {
|
||||
return File.class;
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,13 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.repository.syncmap.DtoMapper;
|
||||
import com.usatiuk.dhfs.syncmap.DtoMapper;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
/**
|
||||
* Maps a {@link File} object to a {@link FileDto} object and vice versa.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileDtoMapper implements DtoMapper<File, FileDto> {
|
||||
@Inject
|
||||
@@ -1,8 +1,8 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
@@ -10,11 +10,20 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Helper class for working with files.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileHelper {
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
/**
|
||||
* Get the chunks of a file.
|
||||
* Transaction is expected to be already started.
|
||||
* @param file the file to get chunks from
|
||||
* @return a list of pairs of chunk offset and chunk key
|
||||
*/
|
||||
public List<Pair<Long, JObjectKey>> getChunks(File file) {
|
||||
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
|
||||
try (var it = jMapHelper.getIterator(file)) {
|
||||
@@ -26,6 +35,13 @@ public class FileHelper {
|
||||
return List.copyOf(chunks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace the chunks of a file.
|
||||
* All previous chunks will be deleted.
|
||||
* Transaction is expected to be already started.
|
||||
* @param file the file to replace chunks in
|
||||
* @param chunks the list of pairs of chunk offset and chunk key
|
||||
*/
|
||||
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
|
||||
jMapHelper.deleteAll(file);
|
||||
|
||||
@@ -1,20 +1,14 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.PeerId;
|
||||
import com.usatiuk.dhfs.RemoteObjectDataWrapper;
|
||||
import com.usatiuk.dhfs.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.repository.ObjSyncHandler;
|
||||
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.repository.SyncHelper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.remoteobj.*;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -29,6 +23,9 @@ import javax.annotation.Nullable;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Handles synchronization of file objects.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
@Inject
|
||||
@@ -47,14 +44,18 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
|
||||
private JKleppmannTreeManager.JKleppmannTree getTree() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs")).orElseThrow();
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve conflict between two file versions, update the file in storage and create a conflict file.
|
||||
*
|
||||
* @param from the peer that sent the update
|
||||
* @param key the key of the file
|
||||
* @param receivedChangelog the changelog of the received file
|
||||
* @param receivedData the received file data
|
||||
*/
|
||||
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
|
||||
@Nullable FileDto receivedData) {
|
||||
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
|
||||
@@ -136,12 +137,12 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
|
||||
do {
|
||||
try {
|
||||
getTreeW().move(parent.getRight(),
|
||||
getTree().move(parent.getRight(),
|
||||
new JKleppmannTreeNodeMetaFile(
|
||||
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
|
||||
newFile.key()
|
||||
),
|
||||
getTreeW().getNewNodeId()
|
||||
getTree().getNewNodeId()
|
||||
);
|
||||
} catch (AlreadyExistsException aex) {
|
||||
i++;
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* JKleppmannTreeNodeMetaDirectory is a record that represents a directory in the JKleppmann tree.
|
||||
* @param name the name of the directory
|
||||
*/
|
||||
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaDirectory(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* JKleppmannTreeNodeMetaFile is a record that represents a file in the JKleppmann tree.
|
||||
* @param name the name of the file
|
||||
* @param fileIno a reference to the `File` object
|
||||
*/
|
||||
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
|
||||
@Override
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaFile(name, fileIno);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return List.of(fileIno);
|
||||
}
|
||||
}
|
||||
@@ -1,27 +1,27 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.dhfs.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.files.objects.ChunkData;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.jmap.JMapEntry;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.dhfsfs.objects.ChunkData;
|
||||
import com.usatiuk.dhfsfs.objects.File;
|
||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -39,80 +39,78 @@ import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
/**
|
||||
* Actual filesystem implementation.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
public class DhfsFileService {
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
|
||||
int targetChunkAlignment;
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
|
||||
int maxChunkSize;
|
||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||
boolean useHashForChunks;
|
||||
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
|
||||
boolean allowRecursiveDelete;
|
||||
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
||||
boolean refVerification;
|
||||
@ConfigProperty(name = "dhfs.objects.write_log")
|
||||
boolean writeLogging;
|
||||
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
RemoteTransaction remoteTx;
|
||||
@Inject
|
||||
TransactionManager jObjectTxManager;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
|
||||
int targetChunkAlignment;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||
boolean useHashForChunks;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
|
||||
boolean allowRecursiveDelete;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
||||
boolean refVerification;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.write_log")
|
||||
boolean writeLogging;
|
||||
|
||||
@Inject
|
||||
JKleppmannTreeManager jKleppmannTreeManager;
|
||||
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
|
||||
private JKleppmannTreeManager.JKleppmannTree getTree() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new chunk with the given data and a new unique ID.
|
||||
*
|
||||
* @param bytes the data to store in the chunk
|
||||
* @return the created chunk
|
||||
*/
|
||||
private ChunkData createChunk(ByteString bytes) {
|
||||
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
|
||||
remoteTx.putData(newChunk);
|
||||
remoteTx.putDataNew(newChunk);
|
||||
return newChunk;
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(500) StartupEvent event) {
|
||||
Log.info("Initializing file service");
|
||||
getTreeW();
|
||||
getTree();
|
||||
}
|
||||
|
||||
private JKleppmannTreeNode getDirEntryW(String name) {
|
||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
private JKleppmannTreeNode getDirEntry(String name) {
|
||||
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private JKleppmannTreeNode getDirEntryR(String name) {
|
||||
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
|
||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) return Optional.empty();
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res);
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Get the attributes of a file or directory.
|
||||
*
|
||||
* @param uuid the UUID of the file or directory
|
||||
* @return the attributes of the file or directory
|
||||
*/
|
||||
public Optional<GetattrRes> getattr(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var ref = curTx.get(JData.class, uuid).orElse(null);
|
||||
@@ -125,7 +123,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||
}
|
||||
} else if (ref instanceof JKleppmannTreeNode) {
|
||||
} else if (ref instanceof JKleppmannTreeNodeHolder) {
|
||||
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||
@@ -134,13 +132,18 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Try to resolve a path to a file or directory.
|
||||
*
|
||||
* @param name the path to resolve
|
||||
* @return the key of the file or directory, or an empty optional if it does not exist
|
||||
*/
|
||||
public Optional<JObjectKey> open(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
try {
|
||||
var ret = getDirEntryR(name);
|
||||
var ret = getDirEntry(name);
|
||||
return switch (ret.meta()) {
|
||||
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno());
|
||||
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
|
||||
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
|
||||
default -> Optional.empty();
|
||||
};
|
||||
@@ -158,11 +161,17 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Create a new file with the given name and mode.
|
||||
*
|
||||
* @param name the name of the file
|
||||
* @param mode the mode of the file
|
||||
* @return the key of the created file
|
||||
*/
|
||||
public Optional<JObjectKey> create(String name, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
@@ -174,7 +183,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
remoteTx.putData(f);
|
||||
|
||||
try {
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
|
||||
} catch (Exception e) {
|
||||
// fobj.getMeta().removeRef(newNodeId);
|
||||
throw e;
|
||||
@@ -183,71 +192,101 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
});
|
||||
}
|
||||
|
||||
//FIXME: Slow..
|
||||
@Override
|
||||
/**
|
||||
* Get the parent directory of a file or directory.
|
||||
*
|
||||
* @param ino the key of the file or directory
|
||||
* @return the parent directory
|
||||
*/
|
||||
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return getTreeW().findParent(w -> {
|
||||
// FIXME: Slow
|
||||
return getTree().findParent(w -> {
|
||||
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
|
||||
return f.getFileIno().equals(ino);
|
||||
return f.fileIno().equals(ino);
|
||||
return false;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Create a new directory with the given name and mode.
|
||||
*
|
||||
* @param name the name of the directory
|
||||
* @param mode the mode of the directory
|
||||
*/
|
||||
public void mkdir(String name, long mode) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
ensureDir(parent);
|
||||
|
||||
String dname = path.getFileName().toString();
|
||||
|
||||
Log.debug("Creating directory " + name);
|
||||
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
|
||||
// TODO: No modes for directories yet
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId());
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Unlink a file or directory.
|
||||
*
|
||||
* @param name the name of the file or directory
|
||||
* @throws DirectoryNotEmptyException if the directory is not empty and recursive delete is not allowed
|
||||
*/
|
||||
public void unlink(String name) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntryOpt(name).orElse(null);
|
||||
if (node == null)
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
|
||||
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
|
||||
if (!allowRecursiveDelete && !node.children().isEmpty())
|
||||
throw new DirectoryNotEmptyException();
|
||||
}
|
||||
getTreeW().trash(node.meta(), node.key());
|
||||
getTree().trash(node.meta(), node.key());
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Rename a file or directory.
|
||||
*
|
||||
* @param from the old name
|
||||
* @param to the new name
|
||||
* @return true if the rename was successful, false otherwise
|
||||
*/
|
||||
public Boolean rename(String from, String to) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntryW(from);
|
||||
var node = getDirEntry(from);
|
||||
JKleppmannTreeNodeMeta meta = node.meta();
|
||||
|
||||
var toPath = Path.of(to);
|
||||
var toDentry = getDirEntryW(toPath.getParent().toString());
|
||||
var toDentry = getDirEntry(toPath.getParent().toString());
|
||||
ensureDir(toDentry);
|
||||
|
||||
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
|
||||
getTree().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Change the mode of a file or directory.
|
||||
*
|
||||
* @param uuid the ID of the file or directory
|
||||
* @param mode the new mode
|
||||
* @return true if the mode was changed successfully, false otherwise
|
||||
*/
|
||||
public Boolean chmod(JObjectKey uuid, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
if (dent instanceof JKleppmannTreeNode) {
|
||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
remoteTx.putData(f.withMode(mode).withMTime(System.currentTimeMillis()));
|
||||
remoteTx.putData(f.withMode(mode).withCurrentMTime());
|
||||
return true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(uuid + " is not a file");
|
||||
@@ -258,10 +297,15 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Read the contents of a directory.
|
||||
*
|
||||
* @param name the path of the directory
|
||||
* @return an iterable of the names of the files in the directory
|
||||
*/
|
||||
public Iterable<String> readDir(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var found = getDirEntryW(name);
|
||||
var found = getDirEntry(name);
|
||||
|
||||
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
@@ -270,8 +314,15 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<ByteString> read(JObjectKey fileUuid, long offset, int length) {
|
||||
/**
|
||||
* Read the contents of a file.
|
||||
*
|
||||
* @param fileUuid the ID of the file
|
||||
* @param offset the offset to start reading from
|
||||
* @param length the number of bytes to read
|
||||
* @return the contents of the file as a ByteString
|
||||
*/
|
||||
public ByteString read(JObjectKey fileUuid, long offset, int length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||
@@ -281,12 +332,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to read: " + fileUuid);
|
||||
return Optional.empty();
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
|
||||
}
|
||||
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||
if (!it.hasNext())
|
||||
return Optional.of(ByteString.empty());
|
||||
return ByteString.empty();
|
||||
|
||||
// if (it.peekNextKey().key() != offset) {
|
||||
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
|
||||
@@ -324,14 +375,20 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
chunk = it.next();
|
||||
}
|
||||
|
||||
return Optional.of(buf);
|
||||
return buf;
|
||||
} catch (Exception e) {
|
||||
Log.error("Error reading file: " + fileUuid, e);
|
||||
return Optional.empty();
|
||||
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the size of a file.
|
||||
*
|
||||
* @param uuid the ID of the file
|
||||
* @return the size of the file
|
||||
*/
|
||||
private ByteString readChunk(JObjectKey uuid) {
|
||||
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
|
||||
|
||||
@@ -343,6 +400,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
return chunkRead.data();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the size of a chunk.
|
||||
*
|
||||
* @param uuid the ID of the chunk
|
||||
* @return the size of the chunk
|
||||
*/
|
||||
private int getChunkSize(JObjectKey uuid) {
|
||||
return readChunk(uuid).size();
|
||||
}
|
||||
@@ -351,24 +414,25 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
return num & -(1L << n);
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Write data to a file.
|
||||
*
|
||||
* @param fileUuid the ID of the file
|
||||
* @param offset the offset to write to
|
||||
* @param data the data to write
|
||||
* @return the number of bytes written
|
||||
*/
|
||||
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (offset < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||
|
||||
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to write: " + fileUuid);
|
||||
return -1L;
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
|
||||
}
|
||||
|
||||
if (writeLogging) {
|
||||
Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " "
|
||||
+ offset + " " + data.size());
|
||||
}
|
||||
|
||||
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||
Map<Long, JObjectKey> removedChunks = new HashMap<>();
|
||||
|
||||
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
||||
long writeEnd = offset + data.size();
|
||||
@@ -406,7 +470,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
|
||||
|
||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||
Map<Long, JObjectKey> newChunks = new HashMap<>();
|
||||
|
||||
if (existingEnd < offset) {
|
||||
if (!pendingPrefix.isEmpty()) {
|
||||
@@ -423,12 +487,13 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
int combinedSize = pendingWrites.size();
|
||||
|
||||
{
|
||||
int targetChunkSize = 1 << targetChunkAlignment;
|
||||
int cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
int end;
|
||||
|
||||
if (targetChunkAlignment < 0)
|
||||
if (combinedSize - cur < maxChunkSize)
|
||||
end = combinedSize;
|
||||
else if (targetChunkAlignment < 0)
|
||||
end = combinedSize;
|
||||
else
|
||||
end = Math.min(cur + targetChunkSize, combinedSize);
|
||||
@@ -444,22 +509,28 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
|
||||
for (var e : removedChunks.entrySet()) {
|
||||
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||
}
|
||||
|
||||
for (var e : newChunks.entrySet()) {
|
||||
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||
}
|
||||
|
||||
remoteTx.putData(file);
|
||||
remoteTx.putData(file.withCurrentMTime());
|
||||
|
||||
return (long) data.size();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Truncate a file to the given length.
|
||||
*
|
||||
* @param fileUuid the ID of the file
|
||||
* @param length the new length of the file
|
||||
* @return true if the truncate was successful, false otherwise
|
||||
*/
|
||||
public Boolean truncate(JObjectKey fileUuid, long length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
@@ -535,21 +606,28 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
|
||||
|
||||
for (var e : removedChunks.entrySet()) {
|
||||
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||
}
|
||||
|
||||
for (var e : newChunks.entrySet()) {
|
||||
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||
}
|
||||
|
||||
remoteTx.putData(file);
|
||||
remoteTx.putData(file.withCurrentMTime());
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
private void fillZeros(long fillStart, long length, NavigableMap<Long, JObjectKey> newChunks) {
|
||||
/**
|
||||
* Fill the given range with zeroes.
|
||||
*
|
||||
* @param fillStart the start of the range
|
||||
* @param length the end of the range
|
||||
* @param newChunks the map to store the new chunks in
|
||||
*/
|
||||
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
|
||||
long combinedSize = (length - fillStart);
|
||||
|
||||
long start = fillStart;
|
||||
@@ -584,26 +662,42 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Read the contents of a symlink.
|
||||
*
|
||||
* @param uuid the ID of the symlink
|
||||
* @return the contents of the symlink as a string
|
||||
*/
|
||||
public String readlink(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return readlinkBS(uuid).toStringUtf8();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Read the contents of a symlink as a ByteString.
|
||||
*
|
||||
* @param uuid the ID of the symlink
|
||||
* @return the contents of the symlink as a ByteString
|
||||
*/
|
||||
public ByteString readlinkBS(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
|
||||
return read(uuid, 0, Math.toIntExact(size(uuid))).get();
|
||||
return read(uuid, 0, Math.toIntExact(size(uuid)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Create a symlink.
|
||||
*
|
||||
* @param oldpath the target of the symlink
|
||||
* @param newpath the path of the symlink
|
||||
* @return the key of the created symlink
|
||||
*/
|
||||
public JObjectKey symlink(String oldpath, String newpath) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(newpath);
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
@@ -617,18 +711,25 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
|
||||
|
||||
remoteTx.putData(f);
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
|
||||
return f.key();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Set the access and modification times of a file.
|
||||
*
|
||||
* @param fileUuid the ID of the file
|
||||
* @param atimeMs the access time in milliseconds
|
||||
* @param mtimeMs the modification time in milliseconds
|
||||
* @return true if the times were set successfully, false otherwise
|
||||
*/
|
||||
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
// FIXME:
|
||||
if (dent instanceof JKleppmannTreeNode) {
|
||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
|
||||
@@ -644,7 +745,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Get the size of a file.
|
||||
*
|
||||
* @param fileUuid the ID of the file
|
||||
* @return the size of the file
|
||||
*/
|
||||
public long size(JObjectKey fileUuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
long realSize = 0;
|
||||
@@ -663,4 +769,16 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
return realSize;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Write data to a file.
|
||||
*
|
||||
* @param fileUuid the ID of the file
|
||||
* @param offset the offset to write to
|
||||
* @param data the data to write
|
||||
* @return the number of bytes written
|
||||
*/
|
||||
public Long write(JObjectKey fileUuid, long offset, byte[] data) {
|
||||
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
/**
|
||||
* DirectoryNotEmptyException is thrown when a directory is not empty.
|
||||
* This exception is used to indicate that a directory cannot be deleted
|
||||
* because it contains files or subdirectories.
|
||||
*/
|
||||
public class DirectoryNotEmptyException extends RuntimeException {
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
/**
|
||||
* GetattrRes is a record that represents the result of a getattr operation.
|
||||
* @param mtime File modification time
|
||||
* @param ctime File creation time
|
||||
* @param mode File mode
|
||||
* @param type File type
|
||||
*/
|
||||
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
public enum GetattrType {
|
||||
FILE,
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.files;
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
@@ -1,9 +1,8 @@
|
||||
package com.usatiuk.dhfs.files;
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.TempDataProfile;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.dhfsfs.objects.File;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
@@ -90,7 +89,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
// for (int start = 0; start < all.length(); start++) {
|
||||
// for (int end = start; end <= all.length(); end++) {
|
||||
// var read = fileService.read(fuuid.toString(), start, end - start);
|
||||
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray());
|
||||
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.toByteArray());
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
@@ -111,17 +110,21 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
|
||||
var uuid = ret.get();
|
||||
|
||||
var curMtime = fileService.getattr(uuid).get().mtime();
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).toByteArray());
|
||||
fileService.write(uuid, 4, new byte[]{10, 11, 12});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
fileService.write(uuid, 10, new byte[]{13, 14});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
|
||||
fileService.write(uuid, 6, new byte[]{15, 16});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
|
||||
fileService.write(uuid, 3, new byte[]{17, 18});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
|
||||
|
||||
var newMtime = fileService.getattr(uuid).get().mtime();
|
||||
Assertions.assertTrue(newMtime > curMtime);
|
||||
|
||||
fileService.unlink("/writeTest");
|
||||
Assertions.assertFalse(fileService.open("/writeTest").isPresent());
|
||||
@@ -135,7 +138,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
|
||||
fileService.unlink("/removeTest");
|
||||
Assertions.assertFalse(fileService.open("/removeTest").isPresent());
|
||||
@@ -149,12 +152,12 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
|
||||
fileService.truncate(uuid, 20);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
@@ -166,12 +169,12 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
|
||||
fileService.truncate(uuid, 20);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
} finally {
|
||||
fileService.unlink("/truncateTest2");
|
||||
}
|
||||
@@ -185,10 +188,10 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
|
||||
fileService.truncate(uuid, 7);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -198,14 +201,14 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
|
||||
Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest"));
|
||||
Assertions.assertFalse(fileService.open("/moveTest").isPresent());
|
||||
Assertions.assertTrue(fileService.open("/movedTest").isPresent());
|
||||
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray());
|
||||
fileService.read(fileService.open("/movedTest").get(), 0, 10).toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -218,9 +221,9 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid2 = ret2.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29});
|
||||
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).toByteArray());
|
||||
|
||||
|
||||
jObjectTxManager.run(() -> {
|
||||
@@ -234,7 +237,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent());
|
||||
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray());
|
||||
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).toByteArray());
|
||||
|
||||
// await().atMost(5, TimeUnit.SECONDS).until(() -> {
|
||||
// jObjectTxManager.run(() -> {
|
||||
@@ -252,8 +255,8 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -263,13 +266,13 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19});
|
||||
Assertions.assertArrayEquals(new byte[]{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
|
||||
}, fileService.read(uuid, 0, 30).get().toByteArray());
|
||||
}, fileService.read(uuid, 0, 30).toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -279,7 +282,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
|
||||
// var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new);
|
||||
// var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0);
|
||||
@@ -294,6 +297,6 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
Assertions.assertTrue(fileService.open("/movedTest2").isPresent());
|
||||
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray());
|
||||
fileService.read(fileService.open("/movedTest2").get(), 0, 10).toByteArray());
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.files;
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.files;
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs;
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs;
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
@@ -30,7 +30,7 @@ public class TestDataCleaner {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
void purgeDirectory(File dir) {
|
||||
public void purgeDirectory(File dir) {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.benchmarks;
|
||||
package com.usatiuk.dhfsfs.benchmarks;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
|
||||
@@ -1,8 +1,8 @@
|
||||
package com.usatiuk.dhfs.benchmarks;
|
||||
package com.usatiuk.dhfsfs.benchmarks;
|
||||
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.TempDataProfile;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfsfs.TempDataProfile;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
@@ -73,24 +73,9 @@
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<groupId>com.github.serceman</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
<version>0.5.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
@@ -122,26 +107,11 @@
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
@@ -169,16 +139,13 @@
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
true
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
</junit.jupiter.execution.parallel.mode.default>
|
||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
0.5
|
||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
|
||||
import jakarta.inject.Singleton;
|
||||
import jdk.internal.access.JavaNioAccess;
|
||||
import jdk.internal.access.SharedSecrets;
|
||||
import sun.misc.Unsafe;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
@Singleton
|
||||
class JnrPtrByteOutputAccessors {
|
||||
JavaNioAccess _nioAccess;
|
||||
Unsafe _unsafe;
|
||||
|
||||
JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException {
|
||||
_nioAccess = SharedSecrets.getJavaNioAccess();
|
||||
Field f = Unsafe.class.getDeclaredField("theUnsafe");
|
||||
f.setAccessible(true);
|
||||
_unsafe = (Unsafe) f.get(null);
|
||||
}
|
||||
|
||||
public JavaNioAccess getNioAccess() {
|
||||
return _nioAccess;
|
||||
}
|
||||
|
||||
public Unsafe getUnsafe() {
|
||||
return _unsafe;
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,15 @@
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.kenai.jffi.MemoryIO;
|
||||
import com.sun.security.auth.module.UnixSystem;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException;
|
||||
import com.usatiuk.dhfs.files.service.GetattrRes;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.dhfsfs.service.DirectoryNotEmptyException;
|
||||
import com.usatiuk.dhfsfs.service.GetattrRes;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.utils.UninitializedByteBuffer;
|
||||
import com.usatiuk.utils.UnsafeAccessor;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -17,15 +20,15 @@ import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import jnr.ffi.Pointer;
|
||||
import jnr.ffi.Runtime;
|
||||
import jnr.ffi.Struct;
|
||||
import jnr.ffi.types.off_t;
|
||||
import org.apache.commons.lang3.SystemUtils;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import ru.serce.jnrfuse.ErrorCodes;
|
||||
import ru.serce.jnrfuse.FuseFillDir;
|
||||
import ru.serce.jnrfuse.FuseStubFS;
|
||||
import ru.serce.jnrfuse.struct.FileStat;
|
||||
import ru.serce.jnrfuse.struct.FuseFileInfo;
|
||||
import ru.serce.jnrfuse.struct.Statvfs;
|
||||
import ru.serce.jnrfuse.struct.Timespec;
|
||||
import ru.serce.jnrfuse.struct.*;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.file.Paths;
|
||||
@@ -36,10 +39,15 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static jnr.posix.FileStat.*;
|
||||
|
||||
/**
|
||||
* FUSE file system implementation.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class DhfsFuse extends FuseStubFS {
|
||||
private static final int blksize = 1048576;
|
||||
private static final int iosize = 1048576;
|
||||
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
|
||||
private final AtomicLong _fh = new AtomicLong(1);
|
||||
@ConfigProperty(name = "dhfs.fuse.root")
|
||||
String root;
|
||||
@ConfigProperty(name = "dhfs.fuse.enabled")
|
||||
@@ -49,13 +57,13 @@ public class DhfsFuse extends FuseStubFS {
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
@Inject
|
||||
JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors;
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
|
||||
private final AtomicLong _fh = new AtomicLong(1);
|
||||
|
||||
/**
|
||||
* Allocate a handle for the given key.
|
||||
* @param key the key to allocate a handle for
|
||||
* @return the allocated handle, not 0
|
||||
*/
|
||||
private long allocateHandle(JObjectKey key) {
|
||||
while (true) {
|
||||
var newFh = _fh.getAndIncrement();
|
||||
@@ -66,47 +74,61 @@ public class DhfsFuse extends FuseStubFS {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the key from the handle.
|
||||
* @param handle the handle to get the key from
|
||||
* @return the key, or null if not found
|
||||
*/
|
||||
private JObjectKey getFromHandle(long handle) {
|
||||
assert handle != 0;
|
||||
if(handle == 0)
|
||||
throw new IllegalStateException("Handle is 0");
|
||||
return _openHandles.get(handle);
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(100000) StartupEvent event) {
|
||||
if (!enabled) return;
|
||||
Paths.get(root).toFile().mkdirs();
|
||||
|
||||
if (!Paths.get(root).toFile().isDirectory())
|
||||
throw new IllegalStateException("Could not create directory " + root);
|
||||
|
||||
Log.info("Mounting with root " + root);
|
||||
|
||||
var uid = new UnixSystem().getUid();
|
||||
var gid = new UnixSystem().getGid();
|
||||
|
||||
var opts = new ArrayList<String>();
|
||||
|
||||
// Assuming macFuse
|
||||
if (SystemUtils.IS_OS_MAC) {
|
||||
if (SystemUtils.IS_OS_WINDOWS) {
|
||||
opts.add("-o");
|
||||
opts.add("iosize=" + iosize);
|
||||
} else if (SystemUtils.IS_OS_LINUX) {
|
||||
// FIXME: There's something else missing: the writes still seem to be 32k max
|
||||
opts.add("auto_cache");
|
||||
opts.add("-o");
|
||||
opts.add("uid=-1");
|
||||
opts.add("-o");
|
||||
opts.add("gid=-1");
|
||||
} else {
|
||||
Paths.get(root).toFile().mkdirs();
|
||||
|
||||
if (!Paths.get(root).toFile().isDirectory())
|
||||
throw new IllegalStateException("Could not create directory " + root);
|
||||
|
||||
var uid = new UnixSystem().getUid();
|
||||
var gid = new UnixSystem().getGid();
|
||||
|
||||
// Assuming macFuse
|
||||
if (SystemUtils.IS_OS_MAC) {
|
||||
opts.add("-o");
|
||||
opts.add("iosize=" + iosize);
|
||||
} else if (SystemUtils.IS_OS_LINUX) {
|
||||
// FIXME: There's something else missing: the writes still seem to be 32k max
|
||||
// opts.add("-o");
|
||||
// opts.add("large_read");
|
||||
opts.add("-o");
|
||||
opts.add("big_writes");
|
||||
opts.add("-o");
|
||||
opts.add("max_read=" + iosize);
|
||||
opts.add("-o");
|
||||
opts.add("max_write=" + iosize);
|
||||
}
|
||||
opts.add("-o");
|
||||
opts.add("big_writes");
|
||||
opts.add("auto_cache");
|
||||
opts.add("-o");
|
||||
opts.add("max_read=" + iosize);
|
||||
opts.add("uid=" + uid);
|
||||
opts.add("-o");
|
||||
opts.add("max_write=" + iosize);
|
||||
opts.add("gid=" + gid);
|
||||
}
|
||||
opts.add("-o");
|
||||
opts.add("auto_cache");
|
||||
opts.add("-o");
|
||||
opts.add("uid=" + uid);
|
||||
opts.add("-o");
|
||||
opts.add("gid=" + gid);
|
||||
|
||||
mount(Paths.get(root), false, debug, opts.toArray(String[]::new));
|
||||
}
|
||||
|
||||
@@ -224,8 +246,8 @@ public class DhfsFuse extends FuseStubFS {
|
||||
var fileKey = getFromHandle(fi.fh.get());
|
||||
var read = fileService.read(fileKey, offset, (int) size);
|
||||
if (read.isEmpty()) return 0;
|
||||
UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
return read.get().size();
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
|
||||
return read.size();
|
||||
} catch (Throwable e) {
|
||||
Log.error("When reading " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
@@ -234,24 +256,22 @@ public class DhfsFuse extends FuseStubFS {
|
||||
|
||||
@Override
|
||||
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
|
||||
var buffer = UninitializedByteBuffer.allocate((int) size);
|
||||
UnsafeAccessor.UNSAFE.copyMemory(
|
||||
buf.address(),
|
||||
UnsafeAccessor.NIO.getBufferAddress(buffer),
|
||||
size
|
||||
);
|
||||
return write(path, buffer, offset, fi);
|
||||
}
|
||||
|
||||
public int write(String path, ByteBuffer buffer, long offset, FuseFileInfo fi) {
|
||||
if (offset < 0) return -ErrorCodes.EINVAL();
|
||||
try {
|
||||
var fileKey = getFromHandle(fi.fh.get());
|
||||
var buffer = ByteBuffer.allocateDirect((int) size);
|
||||
|
||||
if (buffer.isDirect()) {
|
||||
jnrPtrByteOutputAccessors.getUnsafe().copyMemory(
|
||||
buf.address(),
|
||||
jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer),
|
||||
size
|
||||
);
|
||||
} else {
|
||||
buf.get(0, buffer.array(), 0, (int) size);
|
||||
}
|
||||
|
||||
var written = fileService.write(fileKey, offset, UnsafeByteOperations.unsafeWrap(buffer));
|
||||
return written.intValue();
|
||||
} catch (Throwable e) {
|
||||
} catch (Exception e) {
|
||||
Log.error("When writing " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
@@ -387,7 +407,7 @@ public class DhfsFuse extends FuseStubFS {
|
||||
var file = fileOpt.get();
|
||||
var read = fileService.readlinkBS(fileOpt.get());
|
||||
if (read.isEmpty()) return 0;
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
|
||||
buf.putByte(Math.min(size - 1, read.size()), (byte) 0);
|
||||
return 0;
|
||||
} catch (Throwable e) {
|
||||
@@ -419,4 +439,29 @@ public class DhfsFuse extends FuseStubFS {
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int write_buf(String path, FuseBufvec buf, @off_t long off, FuseFileInfo fi) {
|
||||
int size = (int) libFuse.fuse_buf_size(buf);
|
||||
FuseBufvec tmpVec = new FuseBufvec(Runtime.getSystemRuntime());
|
||||
long tmpVecAddr = MemoryIO.getInstance().allocateMemory(Struct.size(tmpVec), false);
|
||||
try {
|
||||
tmpVec.useMemory(Pointer.wrap(Runtime.getSystemRuntime(), tmpVecAddr));
|
||||
FuseBufvec.init(tmpVec, size);
|
||||
var bb = UninitializedByteBuffer.allocate(size);
|
||||
var mem = UninitializedByteBuffer.getAddress(bb);
|
||||
tmpVec.buf.mem.set(mem);
|
||||
tmpVec.buf.size.set(size);
|
||||
int res = (int) libFuse.fuse_buf_copy(tmpVec, buf, 0);
|
||||
if (res != size) {
|
||||
Log.errorv("fuse_buf_copy failed: {0} != {1}", res, size);
|
||||
return -ErrorCodes.ENOMEM();
|
||||
}
|
||||
return write(path, bb, off, fi);
|
||||
} finally {
|
||||
if (tmpVecAddr != 0) {
|
||||
MemoryIO.getInstance().freeMemory(tmpVecAddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,22 +1,24 @@
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import com.google.protobuf.ByteOutput;
|
||||
import com.usatiuk.utils.UnsafeAccessor;
|
||||
import jnr.ffi.Pointer;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.MappedByteBuffer;
|
||||
|
||||
/**
|
||||
* JnrPtrByteOutput is a ByteOutput implementation that writes to a `jnr.ffi.Pointer`.
|
||||
*/
|
||||
public class JnrPtrByteOutput extends ByteOutput {
|
||||
private final Pointer _backing;
|
||||
private final long _size;
|
||||
private final JnrPtrByteOutputAccessors _accessors;
|
||||
private long _pos;
|
||||
|
||||
public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) {
|
||||
public JnrPtrByteOutput(Pointer backing, long size) {
|
||||
_backing = backing;
|
||||
_size = size;
|
||||
_pos = 0;
|
||||
_accessors = accessors;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -47,9 +49,9 @@ public class JnrPtrByteOutput extends ByteOutput {
|
||||
if (value instanceof MappedByteBuffer mb) {
|
||||
mb.load();
|
||||
}
|
||||
long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position();
|
||||
long addr = UnsafeAccessor.NIO.getBufferAddress(value) + value.position();
|
||||
var out = _backing.address() + _pos;
|
||||
_accessors.getUnsafe().copyMemory(addr, out, rem);
|
||||
UnsafeAccessor.UNSAFE.copyMemory(addr, out, rem);
|
||||
} else {
|
||||
_backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem);
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.app;
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import io.quarkus.runtime.Quarkus;
|
||||
import io.quarkus.runtime.QuarkusApplication;
|
||||
@@ -14,8 +14,9 @@ dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=2097152
|
||||
dhfs.files.target_chunk_alignment=19
|
||||
dhfs.files.target_chunk_size=524288
|
||||
dhfs.files.max_chunk_size=524288
|
||||
dhfs.files.target_chunk_alignment=17
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
abstract public class TempDataProfile implements QuarkusTestProfile {
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
}
|
||||
|
||||
@Override
|
||||
final public Map<String, String> getConfigOverrides() {
|
||||
Path tempDirWithPrefix;
|
||||
try {
|
||||
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var ret = new HashMap<String, String>();
|
||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
||||
getConfigOverrides(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import com.usatiuk.dhfs.TempDataProfile;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs;
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs;
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
@@ -30,7 +30,7 @@ public class TestDataCleaner {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
void purgeDirectory(File dir) {
|
||||
public static void purgeDirectory(File dir) {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -32,9 +32,11 @@ public class DhfsFuseIT {
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
Network network = Network.newNetwork();
|
||||
network = Network.newNetwork();
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
@@ -65,22 +67,39 @@ public class DhfsFuseIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
private void checkConsistency() {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
network.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -149,35 +168,6 @@ public class DhfsFuseIT {
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
// TODO: How this fits with the tree?
|
||||
@Test
|
||||
@Disabled
|
||||
void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
@@ -202,6 +192,28 @@ public class DhfsFuseIT {
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTestKickedOut() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("kicked"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
Log.info("Deleting");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
Log.info("Deleted");
|
||||
|
||||
// FIXME?
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
Log.info("Creating");
|
||||
@@ -245,8 +257,8 @@ public class DhfsFuseIT {
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request DELETE " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /dhfs_test/fuse/newfile1").getExitCode());
|
||||
@@ -261,8 +273,8 @@ public class DhfsFuseIT {
|
||||
container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
@@ -306,6 +318,33 @@ public class DhfsFuseIT {
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void dirConflictTest2() throws IOException, InterruptedException, TimeoutException {
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo fdsaio >> /dhfs_test/fuse/a/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo exgrg >> /dhfs_test/fuse/a/testf").getExitCode());
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
Log.warn("Waiting for connections");
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
Log.warn("Connected");
|
||||
|
||||
checkConsistency();
|
||||
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/a*/*");
|
||||
Assertions.assertTrue(ls1.getStdout().contains("fdsaio"));
|
||||
Assertions.assertTrue(ls1.getStdout().contains("exgrg"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void dirCycleTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -35,13 +35,15 @@ public class DhfsFusex3IT {
|
||||
String c2uuid;
|
||||
String c3uuid;
|
||||
|
||||
Network network;
|
||||
|
||||
// This calculation is somewhat racy, so keep it hardcoded for now
|
||||
long emptyFileCount = 9;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
// TODO: Dedup
|
||||
Network network = Network.newNetwork();
|
||||
network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
@@ -91,26 +93,26 @@ public class DhfsFusex3IT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl1 = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
var c2curl3 = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c3uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c3uuid);
|
||||
|
||||
var c3curl = container3.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
@@ -131,6 +133,7 @@ public class DhfsFusex3IT {
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::stop);
|
||||
network.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -190,8 +193,8 @@ public class DhfsFusex3IT {
|
||||
var c3curl = container3.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request DELETE " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
Thread.sleep(10000);
|
||||
|
||||
@@ -252,21 +255,22 @@ public class DhfsFusex3IT {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container1.getContainerId()).exec();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
// Pauses needed as otherwise docker buffers some incoming packets
|
||||
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.pauseContainerCmd(container3.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container1.getContainerId()).exec();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container3.getContainerId()).exec();
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
Log.warn("Waiting for connections");
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
@@ -66,21 +66,25 @@ public class DhfsImage implements Future<String> {
|
||||
.run("apt update && apt install -y libfuse2 curl gcc")
|
||||
.copy("/app", "/app")
|
||||
.copy("/libs", "/libs")
|
||||
.cmd("java", "-ea", "-Xmx128M",
|
||||
.cmd("java", "-ea", "-Xmx256M", "-XX:TieredStopAtLevel=1", "-XX:+UseParallelGC",
|
||||
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED",
|
||||
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED",
|
||||
"--add-opens=java.base/java.nio=ALL-UNNAMED",
|
||||
"--enable-preview",
|
||||
"-Ddhfs.objects.peerdiscovery.interval=1s",
|
||||
"-Ddhfs.objects.invalidation.delay=100",
|
||||
"-Ddhfs.objects.deletion.delay=0",
|
||||
"-Ddhfs.objects.deletion.can-delete-retry-delay=1000",
|
||||
"-Ddhfs.objects.ref_verification=true",
|
||||
"-Ddhfs.objects.write_log=true",
|
||||
"-Ddhfs.objects.sync.timeout=10",
|
||||
"-Ddhfs.objects.sync.timeout=30",
|
||||
"-Ddhfs.objects.sync.ping.timeout=5",
|
||||
"-Ddhfs.objects.reconnect_interval=1s",
|
||||
"-Ddhfs.objects.last-seen.timeout=30",
|
||||
"-Ddhfs.objects.last-seen.update=10",
|
||||
"-Ddhfs.sync.cert-check=false",
|
||||
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
|
||||
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",
|
||||
"-Dquarkus.log.category.\"com.usatiuk.objects.transaction\".level=INFO",
|
||||
"-Ddhfs.objects.periodic-push-op-interval=5s",
|
||||
"-Ddhfs.fuse.root=/dhfs_test/fuse",
|
||||
"-Ddhfs.objects.persistence.files.root=/dhfs_test/data",
|
||||
@@ -1,7 +1,7 @@
|
||||
package com.usatiuk.dhfs.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfs.TestDataCleaner;
|
||||
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@@ -18,10 +18,7 @@ import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
@@ -39,12 +36,18 @@ public class KillIT {
|
||||
File data1;
|
||||
File data2;
|
||||
|
||||
Network network;
|
||||
|
||||
ExecutorService executor;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
executor = Executors.newCachedThreadPool();
|
||||
|
||||
data1 = Files.createTempDirectory("").toFile();
|
||||
data2 = Files.createTempDirectory("").toFile();
|
||||
|
||||
Network network = Network.newNetwork();
|
||||
network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
@@ -78,14 +81,14 @@ public class KillIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -96,11 +99,28 @@ public class KillIT {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
executor.close();
|
||||
network.close();
|
||||
}
|
||||
|
||||
private void checkConsistency() {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTest(TestInfo testInfo) throws Exception {
|
||||
var executor = Executors.newFixedThreadPool(2);
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
@@ -124,24 +144,11 @@ public class KillIT {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs(TestInfo testInfo) throws Exception {
|
||||
var executor = Executors.newFixedThreadPool(2);
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
@@ -165,18 +172,64 @@ public class KillIT {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
@Test
|
||||
void killTest2(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
|
||||
container2.start();
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs2(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
|
||||
container2.start();
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,215 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class LazyFs {
|
||||
private static final String lazyFsPath;
|
||||
|
||||
static {
|
||||
lazyFsPath = System.getProperty("lazyFsPath");
|
||||
System.out.println("LazyFs Path: " + lazyFsPath);
|
||||
}
|
||||
|
||||
private final String mountRoot;
|
||||
private final String dataRoot;
|
||||
private final String name;
|
||||
private final File configFile;
|
||||
private final File fifoFile;
|
||||
private Thread errPiper;
|
||||
private Thread outPiper;
|
||||
private CountDownLatch startLatch;
|
||||
private Process fs;
|
||||
public LazyFs(String name, String mountRoot, String dataRoot) {
|
||||
this.name = name;
|
||||
this.mountRoot = mountRoot;
|
||||
this.dataRoot = dataRoot;
|
||||
|
||||
try {
|
||||
configFile = File.createTempFile("lazyfs", ".conf");
|
||||
configFile.deleteOnExit();
|
||||
|
||||
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
|
||||
fifoFile.deleteOnExit();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
|
||||
}
|
||||
|
||||
private String fifoPath() {
|
||||
return fifoFile.getAbsolutePath();
|
||||
}
|
||||
|
||||
public void start(String extraOpts) {
|
||||
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
|
||||
if (!lfsPath.toFile().isFile())
|
||||
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
|
||||
if (!lfsPath.toFile().canExecute())
|
||||
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
|
||||
|
||||
try (var rwFile = new RandomAccessFile(configFile, "rw");
|
||||
var channel = rwFile.getChannel()) {
|
||||
channel.truncate(0);
|
||||
var config = "[faults]\n" +
|
||||
"fifo_path=\"" + fifoPath() + "\"\n" +
|
||||
"[cache]\n" +
|
||||
"apply_eviction=false\n" +
|
||||
"[cache.simple]\n" +
|
||||
"custom_size=\"1gb\"\n" +
|
||||
"blocks_per_page=1\n" +
|
||||
"[filesystem]\n" +
|
||||
"log_all_operations=false\n" +
|
||||
"logfile=\"\"\n" + extraOpts;
|
||||
rwFile.write(config.getBytes());
|
||||
Log.info("LazyFs config: \n" + config);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
var argList = new ArrayList<String>();
|
||||
|
||||
argList.add(lfsPath.toString());
|
||||
argList.add(Path.of(mountRoot).toString());
|
||||
argList.add("--config-path");
|
||||
argList.add(configFile.getAbsolutePath());
|
||||
argList.add("-o");
|
||||
argList.add("allow_other");
|
||||
argList.add("-o");
|
||||
argList.add("modules=subdir");
|
||||
argList.add("-o");
|
||||
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
|
||||
try {
|
||||
Log.info("Starting LazyFs " + argList);
|
||||
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
startLatch = new CountDownLatch(1);
|
||||
|
||||
outPiper = new Thread(() -> {
|
||||
try {
|
||||
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
|
||||
String line;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
if (line.contains("running LazyFS"))
|
||||
startLatch.countDown();
|
||||
System.out.println(line);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.info("Exception in LazyFs piper", e);
|
||||
}
|
||||
Log.info("LazyFs out piper finished");
|
||||
});
|
||||
outPiper.start();
|
||||
errPiper = new Thread(() -> {
|
||||
try {
|
||||
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
|
||||
String line;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
System.out.println(line);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.info("Exception in LazyFs piper", e);
|
||||
}
|
||||
Log.info("LazyFs err piper finished");
|
||||
});
|
||||
errPiper.start();
|
||||
|
||||
try {
|
||||
if (!startLatch.await(30, TimeUnit.SECONDS))
|
||||
throw new RuntimeException("StartLatch timed out");
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
Log.info("LazyFs started");
|
||||
}
|
||||
|
||||
public void start() {
|
||||
start("");
|
||||
}
|
||||
|
||||
private String mdbPath() {
|
||||
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
|
||||
}
|
||||
|
||||
public void startTornOp() {
|
||||
start("\n" +
|
||||
"[[injection]]\n" +
|
||||
"type=\"torn-seq\"\n" +
|
||||
"op=\"write\"\n" +
|
||||
"file=\"" + mdbPath() + "\"\n" +
|
||||
"persist=[1,4]\n" +
|
||||
"occurrence=3");
|
||||
}
|
||||
|
||||
public void startTornSeq() {
|
||||
start("[[injection]]\n" +
|
||||
"type=\"torn-op\"\n" +
|
||||
"file=\"" + mdbPath() + "\"\n" +
|
||||
"occurrence=3\n" +
|
||||
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
|
||||
"persist=[1,3]");
|
||||
}
|
||||
|
||||
public void crash() {
|
||||
try {
|
||||
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
|
||||
Log.info("Running command: " + cmd);
|
||||
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
try {
|
||||
synchronized (this) {
|
||||
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Doesn't actually work?
|
||||
//
|
||||
// public void crashop() {
|
||||
// try {
|
||||
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
|
||||
// System.out.println("Running command: " + cmd);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||
// Thread.sleep(1000);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||
// Thread.sleep(1000);
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// public void crashseq() {
|
||||
// try {
|
||||
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
|
||||
// System.out.println("Running command: " + cmd);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||
// Thread.sleep(1000);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||
// Thread.sleep(1000);
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
@@ -0,0 +1,489 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testcontainers.DockerClientFactory;
|
||||
import org.testcontainers.containers.GenericContainer;
|
||||
import org.testcontainers.containers.Network;
|
||||
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||
import org.testcontainers.containers.output.WaitingConsumer;
|
||||
import org.testcontainers.containers.wait.strategy.Wait;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
public class LazyFsIT {
|
||||
GenericContainer<?> container1;
|
||||
GenericContainer<?> container2;
|
||||
|
||||
WaitingConsumer waitingConsumer1;
|
||||
WaitingConsumer waitingConsumer2;
|
||||
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
File data1;
|
||||
File data2;
|
||||
File data1Lazy;
|
||||
File data2Lazy;
|
||||
|
||||
LazyFs lazyFs1;
|
||||
LazyFs lazyFs2;
|
||||
|
||||
ExecutorService executor;
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
executor = Executors.newCachedThreadPool();
|
||||
data1 = Files.createTempDirectory("dhfsdata").toFile();
|
||||
data2 = Files.createTempDirectory("dhfsdata").toFile();
|
||||
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||
|
||||
network = Network.newNetwork();
|
||||
|
||||
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
|
||||
lazyFs1.start();
|
||||
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
|
||||
lazyFs2.start();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
|
||||
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
lazyFs1.stop();
|
||||
lazyFs2.stop();
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data1Lazy);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
TestDataCleaner.purgeDirectory(data2Lazy);
|
||||
|
||||
executor.close();
|
||||
network.close();
|
||||
}
|
||||
|
||||
private void checkConsistency(String testName) {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info("Listing consistency " + testName + "\n"
|
||||
+ ls1 + "\n"
|
||||
+ cat1 + "\n"
|
||||
+ ls2 + "\n"
|
||||
+ cat2 + "\n");
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
|
||||
});
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs1.start();
|
||||
case TORN_OP -> lazyFs1.startTornOp();
|
||||
case TORN_SEQ -> lazyFs1.startTornSeq();
|
||||
}
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
Thread.sleep(3000);
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs1.start();
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs1.start();
|
||||
case TORN_OP -> lazyFs1.startTornOp();
|
||||
case TORN_SEQ -> lazyFs1.startTornSeq();
|
||||
}
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
Thread.sleep(3000);
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs1.start();
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs2.start();
|
||||
case TORN_OP -> lazyFs2.startTornOp();
|
||||
case TORN_SEQ -> lazyFs2.startTornSeq();
|
||||
}
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
var barrier2 = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier2.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier2.await();
|
||||
Log.info("Killing");
|
||||
Thread.sleep(3000);
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
lazyFs2.crash();
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs2.start();
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs2.start();
|
||||
case TORN_OP -> lazyFs2.startTornOp();
|
||||
case TORN_SEQ -> lazyFs2.startTornSeq();
|
||||
}
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
var barrier2 = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier2.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier2.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
lazyFs2.crash();
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs2.start();
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
|
||||
private static enum CrashType {
|
||||
CRASH,
|
||||
TORN_OP,
|
||||
TORN_SEQ
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.integration;
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import org.junit.jupiter.api.*;
|
||||
@@ -29,9 +29,11 @@ public class ResyncIT {
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
Network network = Network.newNetwork();
|
||||
network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
@@ -55,6 +57,7 @@ public class ResyncIT {
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
network.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -72,14 +75,14 @@ public class ResyncIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -112,14 +115,14 @@ public class ResyncIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -152,14 +155,14 @@ public class ResyncIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -1,5 +1,8 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Exception thrown when an attempt is made to create a new tree node as a child with a name that already exists.
|
||||
*/
|
||||
public class AlreadyExistsException extends RuntimeException {
|
||||
public AlreadyExistsException(String message) {
|
||||
super(message);
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public class AtomicClock implements Clock<Long>, Serializable {
|
||||
private long _max = 0;
|
||||
|
||||
public AtomicClock(long counter) {
|
||||
_max = counter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTimestamp() {
|
||||
return ++_max;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
_max = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long peekTimestamp() {
|
||||
return _max;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long updateTimestamp(Long receivedTimestamp) {
|
||||
var old = _max;
|
||||
_max = Math.max(_max, receivedTimestamp) + 1;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,26 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Clock interface
|
||||
*/
|
||||
public interface Clock<TimestampT extends Comparable<TimestampT>> {
|
||||
/**
|
||||
* Increment and get the current timestamp.
|
||||
* @return the incremented timestamp
|
||||
*/
|
||||
TimestampT getTimestamp();
|
||||
|
||||
/**
|
||||
* Get the current timestamp without incrementing it.
|
||||
* @return the current timestamp
|
||||
*/
|
||||
TimestampT peekTimestamp();
|
||||
|
||||
/**
|
||||
* Update the timestamp with an externally received timestamp.
|
||||
* Will set the currently stored timestamp to <code>max(receivedTimestamp, currentTimestamp) + 1</code>
|
||||
* @param receivedTimestamp the received timestamp
|
||||
* @return the previous timestamp
|
||||
*/
|
||||
TimestampT updateTimestamp(TimestampT receivedTimestamp);
|
||||
}
|
||||
|
||||
@@ -3,6 +3,13 @@ package com.usatiuk.kleppmanntree;
|
||||
import java.io.Serializable;
|
||||
import java.util.Comparator;
|
||||
|
||||
/**
|
||||
* CombinedTimestamp is a record that represents a timestamp and a node ID, ordered first by timestamp and then by node ID.
|
||||
* @param timestamp the timestamp
|
||||
* @param nodeId the node ID. If null, then only the timestamp is used for ordering.
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the node ID
|
||||
*/
|
||||
public record CombinedTimestamp<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>>
|
||||
(TimestampT timestamp,
|
||||
PeerIdT nodeId) implements Comparable<CombinedTimestamp<TimestampT, PeerIdT>>, Serializable {
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import jakarta.annotation.Nonnull;
|
||||
import jakarta.annotation.Nullable;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
@@ -10,6 +8,14 @@ import java.util.function.Function;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
/**
|
||||
* An implementation of a tree as described in <a href="https://martin.kleppmann.com/papers/move-op.pdf">A highly-available move operation for replicated trees</a>
|
||||
*
|
||||
* @param <TimestampT> Type of the timestamp
|
||||
* @param <PeerIdT> Type of the peer ID
|
||||
* @param <MetaT> Type of the node metadata
|
||||
* @param <NodeIdT> Type of the node ID
|
||||
*/
|
||||
public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
|
||||
private static final Logger LOGGER = Logger.getLogger(KleppmannTree.class.getName());
|
||||
|
||||
@@ -17,8 +23,15 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
private final PeerInterface<PeerIdT> _peers;
|
||||
private final Clock<TimestampT> _clock;
|
||||
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
|
||||
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
|
||||
|
||||
/**
|
||||
* Constructor with all the dependencies
|
||||
*
|
||||
* @param storage Storage interface
|
||||
* @param peers Peer interface
|
||||
* @param clock Clock interface
|
||||
* @param opRecorder Operation recorder interface
|
||||
*/
|
||||
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
|
||||
PeerInterface<PeerIdT> peers,
|
||||
Clock<TimestampT> clock,
|
||||
@@ -29,6 +42,13 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
_opRecorder = opRecorder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Traverse the tree from the given node ID using the given list of names
|
||||
*
|
||||
* @param fromId The starting node ID
|
||||
* @param names The list of names to traverse
|
||||
* @return The resulting node ID or null if not found
|
||||
*/
|
||||
private NodeIdT traverseImpl(NodeIdT fromId, List<String> names) {
|
||||
if (names.isEmpty()) return fromId;
|
||||
|
||||
@@ -42,14 +62,21 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return traverseImpl(childId, names.subList(1, names.size()));
|
||||
}
|
||||
|
||||
public NodeIdT traverse(NodeIdT fromId, List<String> names) {
|
||||
return traverseImpl(fromId, names.subList(1, names.size()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Traverse the tree from its root node using the given list of names
|
||||
*
|
||||
* @param names The list of names to traverse
|
||||
* @return The resulting node ID or null if not found
|
||||
*/
|
||||
public NodeIdT traverse(List<String> names) {
|
||||
return traverseImpl(_storage.getRootId(), names);
|
||||
}
|
||||
|
||||
/**
|
||||
* Undo the effect of a log effect
|
||||
*
|
||||
* @param effect The log effect to undo
|
||||
*/
|
||||
private void undoEffect(LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT> effect) {
|
||||
if (effect.oldInfo() != null) {
|
||||
var node = _storage.getById(effect.childId());
|
||||
@@ -89,10 +116,14 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
node.withParent(null)
|
||||
.withLastEffectiveOp(null)
|
||||
);
|
||||
_undoCtx.put(node.key(), node);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Undo the effects of a log record
|
||||
*
|
||||
* @param op The log record to undo
|
||||
*/
|
||||
private void undoOp(LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
|
||||
LOGGER.finer(() -> "Will undo op: " + op);
|
||||
if (op.effects() != null)
|
||||
@@ -100,16 +131,32 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
undoEffect(e);
|
||||
}
|
||||
|
||||
/**
|
||||
* Redo the operation in a log record
|
||||
*
|
||||
* @param entry The log record to redo
|
||||
*/
|
||||
private void redoOp(Map.Entry<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> entry) {
|
||||
var newEffects = doOp(entry.getValue().op(), false);
|
||||
_storage.getLog().replace(entry.getKey(), newEffects);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform the operation and put it in the log
|
||||
*
|
||||
* @param op The operation to perform
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
private void doAndPut(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
var res = doOp(op, failCreatingIfExists);
|
||||
_storage.getLog().put(res.op().timestamp(), res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to trim the log to the causality threshold
|
||||
*/
|
||||
private void tryTrimLog() {
|
||||
var log = _storage.getLog();
|
||||
var timeLog = _storage.getPeerTimestampLog();
|
||||
@@ -165,22 +212,52 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Move a node to a new parent with new metadata
|
||||
*
|
||||
* @param newParent The new parent node ID
|
||||
* @param newMeta The new metadata
|
||||
* @param child The child node ID
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
public <LocalMetaT extends MetaT> void move(NodeIdT newParent, LocalMetaT newMeta, NodeIdT child) {
|
||||
move(newParent, newMeta, child, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Move a node to a new parent with new metadata
|
||||
*
|
||||
* @param newParent The new parent node ID
|
||||
* @param newMeta The new metadata
|
||||
* @param child The child node ID
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
public void move(NodeIdT newParent, MetaT newMeta, NodeIdT child, boolean failCreatingIfExists) {
|
||||
var createdMove = createMove(newParent, newMeta, child);
|
||||
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
|
||||
_opRecorder.recordOp(createdMove);
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply an external operation from a remote peer
|
||||
*
|
||||
* @param from The peer ID
|
||||
* @param op The operation to apply
|
||||
*/
|
||||
public void applyExternalOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
|
||||
_clock.updateTimestamp(op.timestamp().timestamp());
|
||||
applyOp(from, op, false);
|
||||
}
|
||||
|
||||
// Returns true if the timestamp is newer than what's seen, false otherwise
|
||||
/**
|
||||
* Update the causality threshold timestamp for a peer
|
||||
*
|
||||
* @param from The peer ID
|
||||
* @param newTimestamp The timestamp received from it
|
||||
* @return True if the timestamp was updated, false otherwise
|
||||
*/
|
||||
private boolean updateTimestampImpl(PeerIdT from, TimestampT newTimestamp) {
|
||||
TimestampT oldRef = _storage.getPeerTimestampLog().getForPeer(from);
|
||||
if (oldRef != null && oldRef.compareTo(newTimestamp) >= 0) { // FIXME?
|
||||
@@ -191,6 +268,12 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the causality threshold timestamp for a peer
|
||||
*
|
||||
* @param from The peer ID
|
||||
* @param timestamp The timestamp received from it
|
||||
*/
|
||||
public void updateExternalTimestamp(PeerIdT from, TimestampT timestamp) {
|
||||
var gotExt = _storage.getPeerTimestampLog().getForPeer(from);
|
||||
var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId());
|
||||
@@ -201,6 +284,15 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
tryTrimLog();
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply an operation from a peer
|
||||
*
|
||||
* @param from The peer ID
|
||||
* @param op The operation to apply
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
private void applyOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
if (!updateTimestampImpl(op.timestamp().nodeId(), op.timestamp().timestamp())) return;
|
||||
|
||||
@@ -217,45 +309,52 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
assert cmp != 0;
|
||||
if (cmp < 0) {
|
||||
try {
|
||||
if (log.containsKey(op.timestamp())) return;
|
||||
var toUndo = log.newestSlice(op.timestamp(), false);
|
||||
_undoCtx = new HashMap<>();
|
||||
for (var entry : toUndo.reversed()) {
|
||||
undoOp(entry.getValue());
|
||||
}
|
||||
try {
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
} finally {
|
||||
for (var entry : toUndo) {
|
||||
redoOp(entry);
|
||||
}
|
||||
|
||||
if (!_undoCtx.isEmpty()) {
|
||||
for (var e : _undoCtx.entrySet()) {
|
||||
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
|
||||
_storage.removeNode(e.getKey());
|
||||
}
|
||||
}
|
||||
_undoCtx = null;
|
||||
}
|
||||
} finally {
|
||||
tryTrimLog();
|
||||
if (log.containsKey(op.timestamp())) return;
|
||||
var toUndo = log.newestSlice(op.timestamp(), false);
|
||||
for (var entry : toUndo.reversed()) {
|
||||
undoOp(entry.getValue());
|
||||
}
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
for (var entry : toUndo) {
|
||||
redoOp(entry);
|
||||
}
|
||||
tryTrimLog();
|
||||
} else {
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
tryTrimLog();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a new timestamp, incrementing the one in storage
|
||||
*
|
||||
* @return A new timestamp
|
||||
*/
|
||||
private CombinedTimestamp<TimestampT, PeerIdT> getTimestamp() {
|
||||
return new CombinedTimestamp<>(_clock.getTimestamp(), _peers.getSelfId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new move operation
|
||||
*
|
||||
* @param newParent The new parent node ID
|
||||
* @param newMeta The new metadata
|
||||
* @param node The child node ID
|
||||
* @return A new move operation
|
||||
*/
|
||||
private <LocalMetaT extends MetaT> OpMove<TimestampT, PeerIdT, LocalMetaT, NodeIdT> createMove(NodeIdT newParent, LocalMetaT newMeta, NodeIdT node) {
|
||||
return new OpMove<>(getTimestamp(), newParent, newMeta, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform the operation and return the log record
|
||||
*
|
||||
* @param op The operation to perform
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @return The log record
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> doOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
LOGGER.finer(() -> "Doing op: " + op);
|
||||
LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computed;
|
||||
@@ -264,8 +363,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
} catch (AlreadyExistsException aex) {
|
||||
throw aex;
|
||||
} catch (Exception e) {
|
||||
LOGGER.log(Level.SEVERE, "Error computing effects for op" + op.toString(), e);
|
||||
computed = new LogRecord<>(op, null);
|
||||
throw new RuntimeException("Error computing effects for op " + op.toString(), e);
|
||||
}
|
||||
|
||||
if (computed.effects() != null)
|
||||
@@ -273,28 +371,24 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return computed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a new node from storage
|
||||
*
|
||||
* @param key The node ID
|
||||
* @param parent The parent node ID
|
||||
* @param meta The metadata
|
||||
* @return A new tree node
|
||||
*/
|
||||
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
|
||||
if (_undoCtx != null) {
|
||||
var node = _undoCtx.get(key);
|
||||
if (node != null) {
|
||||
try {
|
||||
if (!node.children().isEmpty()) {
|
||||
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
|
||||
}
|
||||
node = node.withParent(parent).withMeta(meta);
|
||||
} catch (Exception e) {
|
||||
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
|
||||
node = null;
|
||||
}
|
||||
}
|
||||
if (node != null) {
|
||||
_undoCtx.remove(key);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
return _storage.createNewNode(key, parent, meta);
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply the effects of a log record
|
||||
*
|
||||
* @param sourceOp The source operation
|
||||
* @param effects The list of log effects
|
||||
*/
|
||||
private void applyEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> sourceOp, List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) {
|
||||
for (var effect : effects) {
|
||||
LOGGER.finer(() -> "Applying effect: " + effect + " from op " + sourceOp);
|
||||
@@ -335,6 +429,15 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the effects of a move operation
|
||||
*
|
||||
* @param op The operation to process
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @return The log record with the computed effects
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computeEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
var node = _storage.getById(op.childId());
|
||||
|
||||
@@ -372,10 +475,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
var conflictNode = _storage.getById(conflictNodeId);
|
||||
MetaT conflictNodeMeta = conflictNode.meta();
|
||||
|
||||
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
LOGGER.finer(() -> "Node creation conflict: " + conflictNode);
|
||||
|
||||
String newConflictNodeName = op.newName() + ".conflict." + conflictNode.key();
|
||||
@@ -400,18 +499,14 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
if (oldMeta != null
|
||||
&& op.newMeta() != null
|
||||
&& !oldMeta.getClass().equals(op.newMeta().getClass())) {
|
||||
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
|
||||
return new LogRecord<>(op, null);
|
||||
throw new RuntimeException("Class mismatch for meta for node " + node.key());
|
||||
}
|
||||
|
||||
var replaceNodeId = newParent.children().get(op.newName());
|
||||
if (replaceNodeId != null) {
|
||||
var replaceNode = _storage.getById(replaceNodeId);
|
||||
var replaceNodeMeta = replaceNode.meta();
|
||||
|
||||
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
LOGGER.finer(() -> "Node replacement: " + replaceNode);
|
||||
|
||||
return new LogRecord<>(op, List.of(
|
||||
@@ -426,6 +521,13 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a node is an ancestor of another node
|
||||
*
|
||||
* @param child The child node ID
|
||||
* @param parent The parent node ID
|
||||
* @return True if the child is an ancestor of the parent, false otherwise
|
||||
*/
|
||||
private boolean isAncestor(NodeIdT child, NodeIdT parent) {
|
||||
var node = _storage.getById(parent);
|
||||
NodeIdT curParent;
|
||||
@@ -436,6 +538,11 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Walk the tree and apply the given consumer to each node
|
||||
*
|
||||
* @param consumer The consumer to apply to each node
|
||||
*/
|
||||
public void walkTree(Consumer<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> consumer) {
|
||||
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
|
||||
queue.push(_storage.getRootId());
|
||||
@@ -449,6 +556,12 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the parent of a node that matches the given predicate
|
||||
*
|
||||
* @param kidPredicate The predicate to match the child node
|
||||
* @return A pair containing the name of the child and the ID of the parent, or null if not found
|
||||
*/
|
||||
public Pair<String, NodeIdT> findParent(Function<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>, Boolean> kidPredicate) {
|
||||
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
|
||||
queue.push(_storage.getRootId());
|
||||
@@ -469,6 +582,13 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Record the bootstrap operations for a given peer
|
||||
* Will visit all nodes of the tree and add their effective operations to both the queue to be sent to the peer,
|
||||
* and to the global operation log.
|
||||
*
|
||||
* @param host The peer ID
|
||||
*/
|
||||
public void recordBoostrapFor(PeerIdT host) {
|
||||
TreeMap<CombinedTimestamp<TimestampT, PeerIdT>, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT>> result = new TreeMap<>();
|
||||
|
||||
|
||||
@@ -2,6 +2,18 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* LogEffect is a record that represents the effect of a log entry on a tree node.
|
||||
* @param oldInfo the old information about the node, before it was moved. Null if the node did not exist before
|
||||
* @param effectiveOp the operation that had caused this effect to be applied
|
||||
* @param newParentId the ID of the new parent node
|
||||
* @param newMeta the new metadata of the node
|
||||
* @param childId the ID of the child node
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>(
|
||||
LogEffectOld<TimestampT, PeerIdT, MetaT, NodeIdT> oldInfo,
|
||||
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> effectiveOp,
|
||||
@@ -10,14 +22,14 @@ public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT exten
|
||||
NodeIdT childId) implements Serializable {
|
||||
public String oldName() {
|
||||
if (oldInfo.oldMeta() != null) {
|
||||
return oldInfo.oldMeta().getName();
|
||||
return oldInfo.oldMeta().name();
|
||||
}
|
||||
return childId.toString();
|
||||
}
|
||||
|
||||
public String newName() {
|
||||
if (newMeta != null) {
|
||||
return newMeta.getName();
|
||||
return newMeta.name();
|
||||
}
|
||||
return childId.toString();
|
||||
}
|
||||
|
||||
@@ -2,6 +2,16 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Represents the old information about a node before it was moved.
|
||||
* @param oldEffectiveMove the old effective move that had caused this effect to be applied
|
||||
* @param oldParent the ID of the old parent node
|
||||
* @param oldMeta the old metadata of the node
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public record LogEffectOld<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> oldEffectiveMove,
|
||||
NodeIdT oldParent,
|
||||
|
||||
@@ -4,29 +4,82 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* LogInterface is an interface that allows accessing the log of operations
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public interface LogInterface<
|
||||
TimestampT extends Comparable<TimestampT>,
|
||||
PeerIdT extends Comparable<PeerIdT>,
|
||||
MetaT extends NodeMeta,
|
||||
NodeIdT> {
|
||||
/**
|
||||
* Peek the oldest log entry.
|
||||
* @return the oldest log entry
|
||||
*/
|
||||
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> peekOldest();
|
||||
|
||||
/**
|
||||
* Take the oldest log entry.
|
||||
* @return the oldest log entry
|
||||
*/
|
||||
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> takeOldest();
|
||||
|
||||
/**
|
||||
* Peek the newest log entry.
|
||||
* @return the newest log entry
|
||||
*/
|
||||
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> peekNewest();
|
||||
|
||||
/**
|
||||
* Return all log entries that are newer than the given timestamp.
|
||||
* @param since the timestamp to compare with
|
||||
* @param inclusive if true, include the log entry with the given timestamp
|
||||
* @return a list of log entries that are newer than the given timestamp
|
||||
*/
|
||||
List<Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>>>
|
||||
newestSlice(CombinedTimestamp<TimestampT, PeerIdT> since, boolean inclusive);
|
||||
|
||||
/**
|
||||
* Return all the log entries
|
||||
* @return a list of all log entries
|
||||
*/
|
||||
List<Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>>> getAll();
|
||||
|
||||
/**
|
||||
* Checks if the log is empty.
|
||||
* @return true if the log is empty, false otherwise
|
||||
*/
|
||||
boolean isEmpty();
|
||||
|
||||
/**
|
||||
* Checks if the log contains the given timestamp.
|
||||
* @param timestamp the timestamp to check
|
||||
* @return true if the log contains the given timestamp, false otherwise
|
||||
*/
|
||||
boolean containsKey(CombinedTimestamp<TimestampT, PeerIdT> timestamp);
|
||||
|
||||
/**
|
||||
* Get the size of the log.
|
||||
* @return the size of the log (number of entries)
|
||||
*/
|
||||
long size();
|
||||
|
||||
/**
|
||||
* Add a log entry to the log.
|
||||
* @param timestamp the timestamp of the log entry
|
||||
* @param record the log entry
|
||||
* @throws IllegalStateException if the log entry already exists
|
||||
*/
|
||||
void put(CombinedTimestamp<TimestampT, PeerIdT> timestamp, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> record);
|
||||
|
||||
/**
|
||||
* Replace a log entry in the log.
|
||||
* @param timestamp the timestamp of the log entry
|
||||
* @param record the log entry
|
||||
*/
|
||||
void replace(CombinedTimestamp<TimestampT, PeerIdT> timestamp, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> record);
|
||||
}
|
||||
|
||||
@@ -3,6 +3,15 @@ package com.usatiuk.kleppmanntree;
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Represents a log record in the Kleppmann tree.
|
||||
* @param op the operation that is stored in this log record
|
||||
* @param effects the effects of the operation (resulting moves)
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public record LogRecord<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op,
|
||||
List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) implements Serializable {
|
||||
|
||||
@@ -2,8 +2,24 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Represents metadata associated with a node in the Kleppmann tree.
|
||||
* This interface is used to define the metadata that can be associated with nodes in the tree.
|
||||
* Implementations of this interface should provide a name for the node and a method to create a copy of it with a new name.
|
||||
*/
|
||||
public interface NodeMeta extends Serializable {
|
||||
String getName();
|
||||
/**
|
||||
* Returns the name of the node.
|
||||
*
|
||||
* @return the name of the node
|
||||
*/
|
||||
String name();
|
||||
|
||||
/**
|
||||
* Creates a copy of the metadata with a new name.
|
||||
*
|
||||
* @param name the new name for the metadata
|
||||
* @return a new instance of NodeMeta with the specified name
|
||||
*/
|
||||
NodeMeta withName(String name);
|
||||
}
|
||||
|
||||
@@ -2,12 +2,30 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Operation that moves a child node to a new parent node.
|
||||
*
|
||||
* @param timestamp the timestamp of the operation
|
||||
* @param newParentId the ID of the new parent node
|
||||
* @param newMeta the new metadata of the node, can be null
|
||||
* @param childId the ID of the child node (the node that is being moved)
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(CombinedTimestamp<TimestampT, PeerIdT> timestamp, NodeIdT newParentId, MetaT newMeta,
|
||||
NodeIdT childId) implements Serializable {
|
||||
/**
|
||||
* Returns the new name of the node: name extracted from the new metadata if available,
|
||||
* otherwise the child ID converted to string.
|
||||
*
|
||||
* @return the new name of the node
|
||||
*/
|
||||
public String newName() {
|
||||
if (newMeta != null)
|
||||
return newMeta.getName();
|
||||
return newMeta.name();
|
||||
return childId.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,26 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Interface to provide recording operations to be sent to peers asynchronously.
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public interface OpRecorder<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
|
||||
/**
|
||||
* Records an operation to be sent to peers asynchronously.
|
||||
* The operation will be sent to all known peers in the system.
|
||||
*
|
||||
* @param op the operation to be recorded
|
||||
*/
|
||||
void recordOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op);
|
||||
|
||||
/**
|
||||
* Records an operation to be sent to a specific peer asynchronously.
|
||||
*
|
||||
* @param peer the ID of the peer to send the operation to
|
||||
* @param op the operation to be recorded
|
||||
*/
|
||||
void recordOpForPeer(PeerIdT peer, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op);
|
||||
}
|
||||
|
||||
@@ -2,8 +2,22 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
* Interface providing access to a list of known peers.
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
*/
|
||||
public interface PeerInterface<PeerIdT extends Comparable<PeerIdT>> {
|
||||
/**
|
||||
* Returns the ID of the current peer.
|
||||
*
|
||||
* @return the ID of the current peer
|
||||
*/
|
||||
PeerIdT getSelfId();
|
||||
|
||||
/**
|
||||
* Returns a collection of all known peers.
|
||||
*
|
||||
* @return a collection of all known peers
|
||||
*/
|
||||
Collection<PeerIdT> getAllPeers();
|
||||
}
|
||||
|
||||
@@ -1,11 +1,26 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Interface providing a map of newest received timestamps for each peer. (causality thresholds)
|
||||
* If a peer has some timestamp recorded in this map,
|
||||
* it means that all messages coming from this peer will have a newer timestamp.
|
||||
* @param <TimestampT>
|
||||
* @param <PeerIdT>
|
||||
*/
|
||||
public interface PeerTimestampLogInterface<
|
||||
TimestampT extends Comparable<TimestampT>,
|
||||
PeerIdT extends Comparable<PeerIdT>> {
|
||||
|
||||
/**
|
||||
* Get the timestamp for a specific peer.
|
||||
* @param peerId the ID of the peer
|
||||
* @return the timestamp for the peer
|
||||
*/
|
||||
TimestampT getForPeer(PeerIdT peerId);
|
||||
|
||||
/**
|
||||
* Get the timestamp for the current peer.
|
||||
*/
|
||||
void putForPeer(PeerIdT peerId, TimestampT timestamp);
|
||||
|
||||
}
|
||||
|
||||
@@ -1,28 +1,89 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Storage interface for the Kleppmann tree.
|
||||
*
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public interface StorageInterface<
|
||||
TimestampT extends Comparable<TimestampT>,
|
||||
PeerIdT extends Comparable<PeerIdT>,
|
||||
MetaT extends NodeMeta,
|
||||
NodeIdT> {
|
||||
/**
|
||||
* Get the root node ID.
|
||||
*
|
||||
* @return the root node IDx
|
||||
*/
|
||||
NodeIdT getRootId();
|
||||
|
||||
/**
|
||||
* Get the trash node ID.
|
||||
*
|
||||
* @return the trash node ID
|
||||
*/
|
||||
NodeIdT getTrashId();
|
||||
|
||||
/**
|
||||
* Get the lost and found node ID.
|
||||
*
|
||||
* @return the lost and found node ID
|
||||
*/
|
||||
NodeIdT getLostFoundId();
|
||||
|
||||
/**
|
||||
* Get the new node ID.
|
||||
*
|
||||
* @return the new node ID
|
||||
*/
|
||||
NodeIdT getNewNodeId();
|
||||
|
||||
/**
|
||||
* Get the node by its ID.
|
||||
*
|
||||
* @param id the ID of the node
|
||||
* @return the node with the specified ID, or null if not found
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getById(NodeIdT id);
|
||||
|
||||
// Creates a node, returned wrapper is RW-locked
|
||||
/**
|
||||
* Create a new node with the specified key, parent, and metadata.
|
||||
*
|
||||
* @param key the ID of the new node
|
||||
* @param parent the ID of the parent node
|
||||
* @param meta the metadata of the new node
|
||||
* @return the new node
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> createNewNode(NodeIdT key, NodeIdT parent, MetaT meta);
|
||||
|
||||
/**
|
||||
* Put a node into the storage.
|
||||
*
|
||||
* @param node the node to put into the storage
|
||||
*/
|
||||
void putNode(TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> node);
|
||||
|
||||
/**
|
||||
* Remove a node from the storage.
|
||||
*
|
||||
* @param id the ID of the node to remove
|
||||
*/
|
||||
void removeNode(NodeIdT id);
|
||||
|
||||
/**
|
||||
* Get the log interface.
|
||||
*
|
||||
* @return the log interface
|
||||
*/
|
||||
LogInterface<TimestampT, PeerIdT, MetaT, NodeIdT> getLog();
|
||||
|
||||
/**
|
||||
* Get the peer timestamp log interface.
|
||||
*
|
||||
* @return the peer timestamp log interface
|
||||
*/
|
||||
PeerTimestampLogInterface<TimestampT, PeerIdT> getPeerTimestampLog();
|
||||
}
|
||||
|
||||
@@ -5,29 +5,92 @@ import org.pcollections.PMap;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Represents a node in the Kleppmann tree.
|
||||
*
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> extends Serializable {
|
||||
/**
|
||||
* Get the ID of the node.
|
||||
*
|
||||
* @return the ID of the node
|
||||
*/
|
||||
NodeIdT key();
|
||||
|
||||
/**
|
||||
* Get the ID of the parent node.
|
||||
*
|
||||
* @return the ID of the parent node
|
||||
*/
|
||||
NodeIdT parent();
|
||||
|
||||
/**
|
||||
* Get the last effective operation that moved this node.
|
||||
*
|
||||
* @return the last effective operation
|
||||
*/
|
||||
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp();
|
||||
|
||||
/**
|
||||
* Get the metadata stored in this node.
|
||||
*
|
||||
* @return the metadata of the node
|
||||
*/
|
||||
@Nullable
|
||||
MetaT meta();
|
||||
|
||||
/**
|
||||
* Get the name of the node.
|
||||
* If the node has metadata, the name is extracted from it, otherwise the key is converted to string.
|
||||
*
|
||||
* @return the name of the node
|
||||
*/
|
||||
default String name() {
|
||||
var meta = meta();
|
||||
if (meta != null) return meta.getName();
|
||||
if (meta != null) return meta.name();
|
||||
return key().toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the children of this node.
|
||||
*
|
||||
* @return a map of child IDs to their respective nodes
|
||||
*/
|
||||
PMap<String, NodeIdT> children();
|
||||
|
||||
/**
|
||||
* Make a copy of this node with a new parent.
|
||||
*
|
||||
* @param parent the ID of the new parent node
|
||||
* @return a new TreeNode instance with the updated parent
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withParent(NodeIdT parent);
|
||||
|
||||
/**
|
||||
* Make a copy of this node with a new last effective operation.
|
||||
*
|
||||
* @param lastEffectiveOp the new last effective operation
|
||||
* @return a new TreeNode instance with the updated last effective operation
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withLastEffectiveOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp);
|
||||
|
||||
/**
|
||||
* Make a copy of this node with new metadata.
|
||||
*
|
||||
* @param meta the new metadata
|
||||
* @return a new TreeNode instance with the updated metadata
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withMeta(MetaT meta);
|
||||
|
||||
/**
|
||||
* Make a copy of this node with new children.
|
||||
*
|
||||
* @param children the new children
|
||||
* @return a new TreeNode instance with the updated children
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withChildren(PMap<String, NodeIdT> children);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ public abstract class TestNodeMeta implements NodeMeta {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
public String name() {
|
||||
return _name;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,20 @@ package com.usatiuk.objects;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* JData is a marker interface for all objects that can be stored in the object store.
|
||||
*/
|
||||
public interface JData extends Serializable {
|
||||
/**
|
||||
* Returns the key of the object.
|
||||
* @return the key of the object
|
||||
*/
|
||||
JObjectKey key();
|
||||
|
||||
/**
|
||||
* Returns the estimated size of the object in bytes.
|
||||
* @return the estimated size of the object in bytes
|
||||
*/
|
||||
default int estimateSize() {
|
||||
return 100;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,35 @@
|
||||
package com.usatiuk.objects;
|
||||
|
||||
public sealed interface JDataVersionedWrapper permits JDataVersionedWrapperLazy, JDataVersionedWrapperImpl {
|
||||
import com.usatiuk.objects.iterators.Data;
|
||||
|
||||
/**
|
||||
* JDataVersionedWrapper is a wrapper for JData that contains its version number
|
||||
* (the id of the transaction that had changed it last)
|
||||
*/
|
||||
public sealed interface JDataVersionedWrapper extends Data<JDataVersionedWrapper> permits JDataVersionedWrapperLazy, JDataVersionedWrapperImpl {
|
||||
@Override
|
||||
default JDataVersionedWrapper value() {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the wrapped object.
|
||||
*
|
||||
* @return the wrapped object
|
||||
*/
|
||||
JData data();
|
||||
|
||||
/**
|
||||
* Returns the version number of the object.
|
||||
*
|
||||
* @return the version number of the object
|
||||
*/
|
||||
long version();
|
||||
|
||||
/**
|
||||
* Returns the estimated size of the object in bytes.
|
||||
*
|
||||
* @return the estimated size of the object in bytes
|
||||
*/
|
||||
int estimateSize();
|
||||
}
|
||||
|
||||
@@ -4,6 +4,9 @@ import jakarta.annotation.Nonnull;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Simple wrapper for an already-existing JData object with a version.
|
||||
*/
|
||||
public record JDataVersionedWrapperImpl(@Nonnull JData data,
|
||||
long version) implements Serializable, JDataVersionedWrapper {
|
||||
@Override
|
||||
|
||||
@@ -2,18 +2,35 @@ package com.usatiuk.objects;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Lazy JDataVersionedWrapper implementation.
|
||||
* The object is deserialized only when data() is called for the first time.
|
||||
* Also allows to set a callback to be called when the data is loaded (e.g. to cache it).
|
||||
*/
|
||||
public final class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
|
||||
private JData _data;
|
||||
private final long _version;
|
||||
private final int _estimatedSize;
|
||||
private JData _data;
|
||||
private Supplier<JData> _producer;
|
||||
|
||||
/**
|
||||
* Creates a new JDataVersionedWrapperLazy object.
|
||||
*
|
||||
* @param version the version number of the object
|
||||
* @param estimatedSize the estimated size of the object in bytes
|
||||
* @param producer a supplier that produces the wrapped object
|
||||
*/
|
||||
public JDataVersionedWrapperLazy(long version, int estimatedSize, Supplier<JData> producer) {
|
||||
_version = version;
|
||||
_estimatedSize = estimatedSize;
|
||||
_producer = producer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a callback to be called when the data is loaded.
|
||||
*
|
||||
* @param cacheCallback the callback to be called
|
||||
*/
|
||||
public void setCacheCallback(Runnable cacheCallback) {
|
||||
if (_data != null) {
|
||||
throw new IllegalStateException("Cache callback can be set only before data is loaded");
|
||||
|
||||
@@ -7,12 +7,21 @@ import jakarta.inject.Singleton;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* Serializer for JDataVersionedWrapper objects.
|
||||
* The objects are stored in a simple format: first is 8-byte long, then the serialized object.
|
||||
*/
|
||||
@Singleton
|
||||
public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVersionedWrapper> {
|
||||
public class JDataVersionedWrapperSerializer {
|
||||
@Inject
|
||||
ObjectSerializer<JData> dataSerializer;
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Serializes a JDataVersionedWrapper object to a ByteString.
|
||||
*
|
||||
* @param obj the object to serialize
|
||||
* @return the serialized object as a ByteString
|
||||
*/
|
||||
public ByteString serialize(JDataVersionedWrapper obj) {
|
||||
ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
|
||||
buffer.putLong(obj.version());
|
||||
@@ -20,10 +29,17 @@ public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVe
|
||||
return ByteString.copyFrom(buffer).concat(dataSerializer.serialize(obj.data()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public JDataVersionedWrapper deserialize(ByteString data) {
|
||||
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
|
||||
var rawData = data.substring(Long.BYTES);
|
||||
return new JDataVersionedWrapperLazy(version, rawData.size(), () -> dataSerializer.deserialize(rawData));
|
||||
/**
|
||||
* Deserializes a JDataVersionedWrapper object from a ByteBuffer.
|
||||
* Returns a lazy wrapper (JDataVersionedWrapperLazy).
|
||||
*
|
||||
* @param data the ByteBuffer containing the serialized object
|
||||
* @return the deserialized object
|
||||
*/
|
||||
public JDataVersionedWrapper deserialize(ByteBuffer data) {
|
||||
var version = data.getLong();
|
||||
return new JDataVersionedWrapperLazy(version, data.remaining(),
|
||||
() -> dataSerializer.deserialize(data)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,30 +5,68 @@ import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* JObjectKey is an interface for object keys to be used in the object store.
|
||||
*/
|
||||
public sealed interface JObjectKey extends Serializable, Comparable<JObjectKey> permits JObjectKeyImpl, JObjectKeyMax, JObjectKeyMin {
|
||||
JObjectKeyMin MIN = new JObjectKeyMin();
|
||||
JObjectKeyMax MAX = new JObjectKeyMax();
|
||||
|
||||
/**
|
||||
* Creates a new JObjectKey from a string value.
|
||||
*
|
||||
* @param value the string value of the key
|
||||
* @return a new JObjectKey
|
||||
*/
|
||||
static JObjectKey of(String value) {
|
||||
return new JObjectKeyImpl(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new JObjectKey with a random UUID.
|
||||
*
|
||||
* @return a new JObjectKey with a random UUID
|
||||
*/
|
||||
static JObjectKey random() {
|
||||
return new JObjectKeyImpl(UUID.randomUUID().toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a JObjectKey that compares less than all other keys.
|
||||
* Calling value on this key will result in an exception.
|
||||
*
|
||||
* @return a JObjectKey that compares less than all other keys
|
||||
*/
|
||||
static JObjectKey first() {
|
||||
return MIN;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a JObjectKey that compares greater than all other keys.
|
||||
* Calling value on this key will result in an exception.
|
||||
*
|
||||
* @return a JObjectKey that compares greater than all other keys
|
||||
*/
|
||||
static JObjectKey last() {
|
||||
return MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new JObjectKey from a byte array.
|
||||
*
|
||||
* @param bytes the byte array representing the key
|
||||
* @return a new JObjectKey
|
||||
*/
|
||||
static JObjectKey fromBytes(byte[] bytes) {
|
||||
return new JObjectKeyImpl(new String(bytes, StandardCharsets.ISO_8859_1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new JObjectKey from a ByteBuffer.
|
||||
*
|
||||
* @param buff the ByteBuffer representing the key
|
||||
* @return a new JObjectKey
|
||||
*/
|
||||
static JObjectKey fromByteBuffer(ByteBuffer buff) {
|
||||
byte[] bytes = new byte[buff.remaining()];
|
||||
buff.get(bytes);
|
||||
@@ -41,7 +79,17 @@ public sealed interface JObjectKey extends Serializable, Comparable<JObjectKey>
|
||||
@Override
|
||||
String toString();
|
||||
|
||||
/**
|
||||
* Returns the byte buffer representation of the key.
|
||||
*
|
||||
* @return the byte buffer representation of the key
|
||||
*/
|
||||
ByteBuffer toByteBuffer();
|
||||
|
||||
/**
|
||||
* Returns the string value of the key.
|
||||
*
|
||||
* @return the string value of the key
|
||||
*/
|
||||
String value();
|
||||
}
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
package com.usatiuk.objects;
|
||||
|
||||
import com.usatiuk.utils.UninitializedByteBuffer;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A "real" implementation of JObjectKey, containing an underlying string, and a cached lazily created byte buffer.
|
||||
*/
|
||||
public final class JObjectKeyImpl implements JObjectKey {
|
||||
@Serial
|
||||
private static final long serialVersionUID = 0L;
|
||||
@@ -46,7 +51,7 @@ public final class JObjectKeyImpl implements JObjectKey {
|
||||
synchronized (this) {
|
||||
if (_bb != null) return _bb;
|
||||
var bytes = value.getBytes(StandardCharsets.ISO_8859_1);
|
||||
var directBb = ByteBuffer.allocateDirect(bytes.length);
|
||||
var directBb = UninitializedByteBuffer.allocate(bytes.length);
|
||||
directBb.put(bytes);
|
||||
directBb.flip();
|
||||
_bb = directBb;
|
||||
@@ -69,7 +74,7 @@ public final class JObjectKeyImpl implements JObjectKey {
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(value);
|
||||
return value.hashCode();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -2,6 +2,9 @@ package com.usatiuk.objects;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* JObjectKey implementation that compares greater than all other keys.
|
||||
*/
|
||||
public record JObjectKeyMax() implements JObjectKey {
|
||||
@Override
|
||||
public int compareTo(JObjectKey o) {
|
||||
|
||||
@@ -2,6 +2,9 @@ package com.usatiuk.objects;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* JObjectKey implementation that compares less than all other keys.
|
||||
*/
|
||||
public record JObjectKeyMin() implements JObjectKey {
|
||||
@Override
|
||||
public int compareTo(JObjectKey o) {
|
||||
|
||||
@@ -2,12 +2,17 @@ package com.usatiuk.objects;
|
||||
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.utils.SerializationHelper;
|
||||
import io.quarkus.arc.DefaultBean;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* Simple Java object serializer.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
@DefaultBean
|
||||
public class JavaDataSerializer implements ObjectSerializer<JData> {
|
||||
@@ -16,9 +21,8 @@ public class JavaDataSerializer implements ObjectSerializer<JData> {
|
||||
return SerializationHelper.serialize(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JData deserialize(ByteString data) {
|
||||
try (var is = data.newInput()) {
|
||||
public JData deserialize(ByteBuffer data) {
|
||||
try (var is = UnsafeByteOperations.unsafeWrap(data).newInput()) {
|
||||
return SerializationHelper.deserialize(is);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
||||
@@ -2,8 +2,27 @@ package com.usatiuk.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* Interface for serializing and deserializing objects.
|
||||
*
|
||||
* @param <T> the type of object to serialize/deserialize
|
||||
*/
|
||||
public interface ObjectSerializer<T> {
|
||||
/**
|
||||
* Serialize an object to a ByteString.
|
||||
*
|
||||
* @param obj the object to serialize
|
||||
* @return the serialized object as a ByteString
|
||||
*/
|
||||
ByteString serialize(T obj);
|
||||
|
||||
T deserialize(ByteString data);
|
||||
/**
|
||||
* Deserialize an object from a ByteBuffer.
|
||||
*
|
||||
* @param data the ByteBuffer containing the serialized object
|
||||
* @return the deserialized object
|
||||
*/
|
||||
T deserialize(ByteBuffer data);
|
||||
}
|
||||
|
||||
@@ -1,24 +1,70 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends Iterator<Pair<K, V>>, AutoCloseableNoThrow {
|
||||
|
||||
/**
|
||||
* An iterator over key-value pairs that can be closed and supports peek and skip operations, in both directions.
|
||||
* @param <K> the type of the keys
|
||||
* @param <V> the type of the values
|
||||
*/
|
||||
public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends Iterator<Pair<K, V>>, AutoCloseable {
|
||||
/**
|
||||
* Returns the upcoming key in the forward direction without advancing the iterator.
|
||||
*
|
||||
* @return the current key
|
||||
* @throws IllegalStateException if there is no next element
|
||||
*/
|
||||
K peekNextKey();
|
||||
|
||||
/**
|
||||
* Skips the next element in the forward direction.
|
||||
*
|
||||
* @throws IllegalStateException if there is no next element
|
||||
*/
|
||||
void skip();
|
||||
|
||||
/**
|
||||
* Checks if there is a next element in the forward direction.
|
||||
*
|
||||
* @return true if there is a next element, false otherwise
|
||||
* @throws IllegalStateException if there is no next element
|
||||
*/
|
||||
K peekPrevKey();
|
||||
|
||||
/**
|
||||
* Returns the key-value pair in the reverse direction, and advances the iterator.
|
||||
*
|
||||
* @return the previous key-value pair
|
||||
* @throws IllegalStateException if there is no previous element
|
||||
*/
|
||||
Pair<K, V> prev();
|
||||
|
||||
/**
|
||||
* Checks if there is a previous element in the reverse direction.
|
||||
*
|
||||
* @return true if there is a previous element, false otherwise
|
||||
*/
|
||||
boolean hasPrev();
|
||||
|
||||
/**
|
||||
* Skips the previous element in the reverse direction.
|
||||
*
|
||||
* @throws IllegalStateException if there is no previous element
|
||||
*/
|
||||
void skipPrev();
|
||||
|
||||
/**
|
||||
* Returns a reversed iterator that iterates in the reverse direction.
|
||||
*
|
||||
* @return a new CloseableKvIterator that iterates in the reverse direction
|
||||
*/
|
||||
default CloseableKvIterator<K, V> reversed() {
|
||||
return new ReversedKvIterator<K, V>(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
void close();
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public record Data<V>(V value) implements MaybeTombstone<V> {
|
||||
@Override
|
||||
public Optional<V> opt() {
|
||||
return Optional.of(value);
|
||||
}
|
||||
/**
|
||||
* Interface indicating that data is present.
|
||||
* @param <V> the type of the value
|
||||
*/
|
||||
public interface Data<V> extends MaybeTombstone<V> {
|
||||
/**
|
||||
* Get the value.
|
||||
* @return the value
|
||||
*/
|
||||
V value();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
/**
|
||||
* Simple implementation of the Data interface.
|
||||
* @param value the value
|
||||
* @param <V> the type of the value
|
||||
*/
|
||||
public record DataWrapper<V>(V value) implements Data<V> {
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface IterProdFn<K extends Comparable<K>, V> {
|
||||
CloseableKvIterator<K, V> get(IteratorStart start, K key);
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
/**
|
||||
* Allows to specify initial positioning of the iterator relative to the requested key.
|
||||
*/
|
||||
public enum IteratorStart {
|
||||
LT,
|
||||
LE,
|
||||
|
||||
@@ -5,11 +5,25 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A key-value iterator that filters keys based on a predicate.
|
||||
*
|
||||
* @param <K> the type of the keys
|
||||
* @param <V> the type of the values
|
||||
*/
|
||||
public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<K, Boolean> _filter;
|
||||
private K _next;
|
||||
|
||||
/**
|
||||
* Constructs a KeyPredicateKvIterator with the specified backing iterator, start position, and filter.
|
||||
*
|
||||
* @param backing the backing iterator
|
||||
* @param start the starting position relative to the startKey
|
||||
* @param startKey the starting key
|
||||
* @param filter the filter function to apply to keys. Only keys for which this function returns true will be included in the iteration.
|
||||
*/
|
||||
public KeyPredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<K, Boolean> filter) {
|
||||
_goingForward = true;
|
||||
_backing = backing;
|
||||
@@ -39,20 +53,20 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
|
||||
}
|
||||
|
||||
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
// switch (start) {
|
||||
// case LT -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
// }
|
||||
// case GT -> {
|
||||
// assert _next == null || _next.compareTo(startKey) > 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
|
||||
@@ -4,10 +4,23 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A mapping key-value iterator that transforms the values of a backing iterator using a specified function.
|
||||
*
|
||||
* @param <K> the type of the keys
|
||||
* @param <V> the type of the values in the backing iterator
|
||||
* @param <V_T> the type of the transformed values
|
||||
*/
|
||||
public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements CloseableKvIterator<K, V_T> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<V, V_T> _transformer;
|
||||
|
||||
/**
|
||||
* Constructs a MappingKvIterator with the specified backing iterator and transformer function.
|
||||
*
|
||||
* @param backing the backing iterator
|
||||
* @param transformer the function to transform values
|
||||
*/
|
||||
public MappingKvIterator(CloseableKvIterator<K, V> backing, Function<V, V_T> transformer) {
|
||||
_backing = backing;
|
||||
_transformer = transformer;
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Optional-like interface, can either be {@link Data} or {@link Tombstone}.
|
||||
* @param <T> the type of the value
|
||||
*/
|
||||
public interface MaybeTombstone<T> {
|
||||
Optional<T> opt();
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user