1 Commits

Author SHA1 Message Date
4bd7266c89 don't try to read objects we know don't exist when committing
such as new chunks with random ids
2025-04-13 11:57:22 +02:00
404 changed files with 7626 additions and 9617 deletions

4
.dockerignore Normal file
View File

@@ -0,0 +1,4 @@
**/.parcel-cache
**/dist
**/node_modules
**/target

View File

@@ -7,6 +7,12 @@ on:
pull_request:
branches: ["main"]
env:
# Use docker.io for Docker Hub if empty
REGISTRY: ghcr.io
# github.repository as <account>/<repo>
IMAGE_NAME: ${{ github.repository }}
jobs:
build-dhfs:
runs-on: ubuntu-latest
@@ -14,21 +20,26 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: "recursive"
- name: Install sudo for ACT
run: apt-get update && apt-get install -y sudo
if: env.ACT=='true'
- name: Install FUSE
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
- name: Install fuse and maven
run: sudo apt-get update && sudo apt-get install -y libfuse2
- name: User allow other for fuse
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
- name: Download maven
run: |
cd "$HOME"
mkdir maven-bin
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
- name: Dump fuse.conf
run: cat /etc/fuse.conf
- name: Maven info
run: |
echo $GITHUB_PATH
echo $PATH
mvn -v
- name: Set up JDK 21
uses: actions/setup-java@v4
@@ -37,21 +48,16 @@ jobs:
distribution: "zulu"
cache: maven
- name: Build LazyFS
run: cd thirdparty/lazyfs/ && ./build.sh
- name: Test with Maven
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify javadoc:aggregate
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
# - name: Build with Maven
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
- uses: actions/upload-artifact@v4
with:
name: DHFS Server Package
path: dhfs-parent/dhfs-fuse/target/quarkus-app
- uses: actions/upload-artifact@v4
with:
name: DHFS Javadocs
path: dhfs-parent/target/reports/apidocs/
path: dhfs-parent/server/target/quarkus-app
- uses: actions/upload-artifact@v4
if: ${{ always() }}
@@ -83,12 +89,211 @@ jobs:
name: Webui
path: webui/dist
build-native-libs:
strategy:
matrix:
include:
- os: ubuntu-latest
cross: "linux/amd64"
- os: ubuntu-latest
cross: "linux/arm64"
- os: macos-latest
runs-on: ${{ matrix.os }}
env:
DO_LOCAL_BUILD: ${{ matrix.os == 'macos-latest' }}
DOCKER_PLATFORM: ${{ matrix.cross || 'NATIVE' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set SANITIZED_DOCKER_PLATFORM
run: echo "SANITIZED_DOCKER_PLATFORM=$(echo $DOCKER_PLATFORM | tr / _ )" >> $GITHUB_ENV
- name: Set DOCKER_BUILDER_IMAGE
run: echo "DOCKER_BUILDER_IMAGE=dhfs_lib_builder-${{matrix.os}}-$SANITIZED_DOCKER_PLATFORM" >> $GITHUB_ENV
- name: Build config
run: |
echo DO_LOCAL_BUILD: $DO_LOCAL_BUILD
echo DOCKER_PLATFORM: $DOCKER_PLATFORM
echo SANITIZED_DOCKER_PLATFORM: $SANITIZED_DOCKER_PLATFORM
echo DOCKER_BUILDER_IMAGE: $DOCKER_BUILDER_IMAGE
- name: Set up JDK 21
if: ${{ env.DO_LOCAL_BUILD == 'TRUE' }}
uses: actions/setup-java@v4
with:
java-version: "21"
distribution: "zulu"
cache: maven
- name: Set up Docker Buildx
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/setup-buildx-action@v3
- name: Set up QEMU
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/setup-qemu-action@v3
- name: Build Docker builder image
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/build-push-action@v5
with:
context: ./libdhfs_support/builder
file: ./libdhfs_support/builder/Dockerfile
push: false
platforms: ${{ env.DOCKER_PLATFORM }}
tags: ${{ env.DOCKER_BUILDER_IMAGE }}
cache-from: type=gha,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
cache-to: type=gha,mode=max,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
load: true
- name: Build the library
run: |
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result"
- name: Upload build
uses: actions/upload-artifact@v4
with:
name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }}
path: result
merge-native-libs:
runs-on: ubuntu-latest
needs: [build-native-libs]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: downloaded-libs
- name: Merge all
run: rsync -av downloaded-libs/NativeLib*/* result/
- name: Check that libs exists
run: |
test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1
- name: Upload
uses: actions/upload-artifact@v4
with:
name: NativeLibs
path: result
publish-docker:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
# This is used to complete the identity challenge
# with sigstore/fulcio when running outside of PRs.
id-token: write
needs: [build-webui, merge-native-libs, build-dhfs]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Download server package
uses: actions/download-artifact@v4
with:
name: DHFS Server Package
path: dhfs-package-downloaded
- name: Download webui
uses: actions/download-artifact@v4
with:
name: Webui
path: webui-dist-downloaded
- name: Download native libs
uses: actions/download-artifact@v4
with:
name: NativeLibs
path: dhfs-native-downloaded
- name: Show all the files
run: find .
# Install the cosign tool except on PR
# https://github.com/sigstore/cosign-installer
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@v3.5.0
with:
cosign-release: "v2.2.4"
# Set up BuildKit Docker container builder to be able to build
# multi-platform images and export cache
# https://github.com/docker/setup-buildx-action
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Extract metadata (tags, labels) for Docker
# https://github.com/docker/metadata-action
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# Build and push Docker image with Buildx (don't push on PR)
# https://github.com/docker/build-push-action
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile.ci
push: ${{ github.event_name != 'pull_request' }}
platforms: linux/amd64,linux/arm64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
# Sign the resulting Docker image digest except on PRs.
# This will only write to the public Rekor transparency log when the Docker
# repository is public to avoid leaking data. If you would like to publish
# transparency data even for private images, pass --force to cosign below.
# https://github.com/sigstore/cosign
- name: Sign the published Docker image
if: ${{ github.event_name != 'pull_request' }}
env:
# https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable
TAGS: ${{ steps.meta.outputs.tags }}
DIGEST: ${{ steps.build-and-push.outputs.digest }}
# This step uses the identity token to provision an ephemeral certificate
# against the sigstore community Fulcio instance.
run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST}
publish-run-wrapper:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
# This is used to complete the identity challenge
# with sigstore/fulcio when running outside of PRs.
id-token: write
needs: [build-webui, build-dhfs]
needs: [build-webui, merge-native-libs, build-dhfs]
steps:
- name: Checkout repository
@@ -104,6 +309,11 @@ jobs:
name: Webui
path: webui-dist-downloaded
- uses: actions/download-artifact@v4
with:
name: NativeLibs
path: dhfs-native-downloaded
- name: Show all the files
run: find .
@@ -111,18 +321,17 @@ jobs:
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
- name: Copy DHFS
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
- name: Copy Webui
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
- name: Copy Webui
run: cp -r ./dhfs-native-downloaded "run-wrapper-out/dhfs/app/NativeLibs"
- name: Copy run wrapper
run: cp -r ./run-wrapper/* "run-wrapper-out/dhfs/app/"
- name: Copy README
run: |
cp README.md "run-wrapper-out/dhfs/"
- name: Add version to run wrapper
run: echo $GITHUB_RUN_ID > "run-wrapper-out/dhfs/app/"version
@@ -134,36 +343,3 @@ jobs:
with:
name: Run wrapper
path: ~/run-wrapper.tar.gz
publish-javadoc:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
permissions:
contents: read
pages: write
id-token: write
needs: [build-webui, build-dhfs]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: DHFS Javadocs
path: dhfs-javadocs-downloaded
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
path: "dhfs-javadocs-downloaded"
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

3
.gitmodules vendored
View File

@@ -1,3 +0,0 @@
[submodule "thirdparty/lazyfs/lazyfs"]
path = thirdparty/lazyfs/lazyfs
url = git@github.com:dsrhaslab/lazyfs.git

View File

@@ -1 +0,0 @@
Syncthing

35
Dockerfile Normal file
View File

@@ -0,0 +1,35 @@
FROM node:20-bullseye as webui-build
WORKDIR /usr/src/app/webui-build
COPY ./webui/package*.json ./
RUN npm i
COPY ./webui/. .
RUN npm run build
FROM azul/zulu-openjdk:21 as server-build
WORKDIR /usr/src/app/server-build
COPY ./server/.mvn .mvn
COPY ./server/mvnw ./server/pom.xml ./
RUN ./mvnw quarkus:go-offline
# The previous thing still doesn't download 100% everything
RUN ./mvnw -Dmaven.test.skip=true -Dskip.unit=true package --fail-never
COPY ./server/. .
RUN ./mvnw -Dmaven.test.skip=true -Dskip.unit=true clean package
FROM azul/zulu-openjdk-alpine:21-jre-headless
RUN apk update && apk add fuse && rm -rf /var/cache/apk/*
WORKDIR /usr/src/app
COPY --from=server-build /usr/src/app/server-build/target/quarkus-app/. .
RUN mkdir -p webui
COPY --from=webui-build /usr/src/app/webui-build/dist/. ./webui
ENV dhfs_webui_root=/usr/src/app/webui
COPY ./dockerentry.sh .
RUN ["chmod", "+x", "./dockerentry.sh"]
CMD [ "./dockerentry.sh" ]

24
Dockerfile.ci Normal file
View File

@@ -0,0 +1,24 @@
FROM azul/zulu-openjdk:21-jre-headless
RUN apt update && apt install -y libfuse2 && apt-get clean
WORKDIR /usr/src/app
COPY ./dhfs-package-downloaded/lib .
COPY ./dhfs-package-downloaded/*.jar .
COPY ./dhfs-package-downloaded/app .
COPY ./dhfs-package-downloaded/quarkus .
WORKDIR /usr/src/app/native-libs
COPY ./dhfs-native-downloaded/. .
WORKDIR /usr/src/app/webui
COPY ./webui-dist-downloaded/. .
ENV dhfs_webui_root=/usr/src/app/webui
WORKDIR /usr/src/app
COPY ./dockerentry.sh .
RUN ["chmod", "+x", "./dockerentry.sh"]
CMD [ "./dockerentry.sh" ]

View File

@@ -1,6 +1,4 @@
# Distributed Home File System
[Javadocs](https://usatiuk.github.io/dhfs/)
# Distributed Home File System 🚧
## What is this?
@@ -13,71 +11,9 @@ Syncthing and allowing you to stream your files like Google Drive File Stream
[Download latest build](https://nightly.link/usatiuk/dhfs/workflows/server/main/Run%20wrapper.zip)
This is a simple set of scripts that allows you to run/stop
the DHFS server in the background, and update it.
This is a simple wrapper around the jar/web ui distribution that allows you to run/stop
the DHFS server in the background, and update itself (hopefully!)
Once unpacked, in the root folder (`dhfs`), there will be 3 folders:
## How to use it and how it works?
- `app` contains the application
- `data` contains the filesystem data storage
- `fuse` is the default filesystem mount point (not on Windows, the default mount drive letter is `Z`)
Note that on Windows, the path to the root can not contain spaces.
## How to use it?
### General prerequisites
Java should be available as `java` in `PATH`, or with a correctly set `JAVA_HOME` (ignored on Windows), and Java 21 is required.
FUSE 2 userspace library also should be available:
- On Ubuntu `libfuse2` package can be installed, or an analogous package for other distributions.
- On Windows, [WinFsp](https://winfsp.dev/) should be installed.
- On macOS, [macFUSE](https://macfuse.github.io/).
### How to run it?
In the run-wrapper `app` folder, 3 scripts are available.
- `run` script starts the filesystem
- `stop` script stops it
- `update` script will update the filesystem to the newest available CI build
On Windows, Powershell versions of the scripts should be used. For them to work, it might be required to allow execution of unsigned scripts using `set-executionpolicy unrestricted`.
### Additional options
Additional options for the filesystem can be specified in the `extra-opts` file in the same directory with the run scripts.
One line in the `extra-opts` file corresponds to one option passed to the JVM when starting the filesystem.
Some extra possible configuration options are:
- `-Ddhfs.fuse.root=` specifies the root where filesystem should be mounted. By default, it is the `fuse` path under the `run-wrapper` root. For windows, it should be a disk root, by default `Z:\`.
- `-Ddhfs.objects.last-seen.timeout=` specifies the period of time (in seconds) after which unavailable peers will be ignored for garbage collection and resynchronized after being reconnected. The default is 43200 (30 days), if set to `-1`, this feature is disabled.
- `-Ddhfs.objects.autosync.download-all=` specifies whether all objects (files and their data) should be downloaded to this peer. `true` or `false`, the default is `false`.
- `-Ddhfs.objects.peerdiscovery.port=` port to broadcast on and listen to for LAN peer discovery (default is `42262`)
- `-Ddhfs.objects.peerdiscovery.broadcast=` whether to enable local peer discovery or not (default is `true`)
- `-Dquarkus.http.port=` HTTP port to listen on (default is `8080`)
- `-Dquarkus.http.ssl-port=` HTTPS port to listen on (default is `8443`)
- `-Dquarkus.http.host=` IP address to listen on (default is `0.0.0.0`)
- `-Ddhfs.peerdiscovery.static-peers=` allows to manually specify a peer's address in format of `peer id:http port:https port`, for example `-Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011`
On Windows, the entire space for the filesystem should also be preallocated, the `-Ddhfs.objects.persistence.lmdb.size=` option controls the size (the value is in bytes), on Windows the default is 100 GB.
In case of errors, the standard output is redirected to `quarkus.log` in the `app` folder, on Windows the error output is separate.
### How to connect to other peers?
Then, a web interface will be available at `losthost:8080` (or whatever the HTTP port is), that can be used to connect with other peers. Peers on local network should be available to be connected to automatically.
## Other notes
### Running tests
To run LazyFS tests, LazyFS needs to be built: the git submodules need to be cloned and `./thirdparty/lazyfs/build.sh` script needs to be run.
LazyFS tests were only tested on Linux.
TODO 😁

View File

@@ -41,5 +41,3 @@ nb-configuration.xml
# Plugin directory
/.quarkus/cli/plugins/
.jqwik-database

View File

@@ -1,16 +1,17 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true" />
</method>
</configuration>
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main"/>
<module name="server"/>
<option name="VM_PARAMETERS"
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011"/>
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.*"/>
<option name="ENABLED" value="true"/>
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true"/>
</method>
</configuration>
</component>

View File

@@ -1,16 +1,18 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true" />
</method>
</configuration>
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication"
nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main"/>
<module name="server"/>
<option name="VM_PARAMETERS"
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021"/>
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.*"/>
<option name="ENABLED" value="true"/>
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true"/>
</method>
</configuration>
</component>

View File

@@ -0,0 +1,60 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap-deployment</artifactId>
<name>Autoprotomap - Deployment</name>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc-deployment</artifactId>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5-internal</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc-deployment</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<id>default-compile</id>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-processor</artifactId>
<version>${quarkus.platform.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,78 @@
package com.usatiuk.autoprotomap.deployment;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.arc.deployment.GeneratedBeanBuildItem;
import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor;
import io.quarkus.deployment.annotations.BuildProducer;
import io.quarkus.deployment.annotations.BuildStep;
import io.quarkus.deployment.builditem.ApplicationIndexBuildItem;
import io.quarkus.gizmo.ClassCreator;
import io.quarkus.gizmo.SignatureBuilder;
import jakarta.inject.Singleton;
import org.jboss.jandex.ClassType;
import org.jboss.jandex.Type;
class AutoprotomapProcessor {
@BuildStep
ProtoIndexBuildItem index(ApplicationIndexBuildItem jandex) {
var ret = new ProtoIndexBuildItem();
var annot = jandex.getIndex().getAnnotations(ProtoMirror.class);
for (var a : annot) {
var protoTarget = jandex.getIndex().getClassByName(((ClassType) a.value().value()).name());
// if (!messageImplementors.contains(protoTarget))
// throw new IllegalArgumentException("Expected " + protoTarget + " to be a proto message");
System.out.println("Found: " + a.name().toString() + " at " + protoTarget.name().toString() + " of " + a.target().asClass().name().toString());
ret.protoMsgToObj.put(protoTarget, a.target().asClass());
}
return ret;
}
@BuildStep
void generateProtoSerializer(ApplicationIndexBuildItem jandex,
ProtoIndexBuildItem protoIndex,
BuildProducer<GeneratedBeanBuildItem> generatedClasses) {
try {
for (var o : protoIndex.protoMsgToObj.entrySet()) {
System.out.println("Generating " + o.getKey().toString() + " -> " + o.getValue().toString());
var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedClasses);
var msgType = io.quarkus.gizmo.Type.classType(o.getKey().name());
var objType = io.quarkus.gizmo.Type.classType(o.getValue().name());
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
msgType, objType);
var msgJType = Type.create(o.getKey().name(), Type.Kind.CLASS);
var objJType = Type.create(o.getValue().name(), Type.Kind.CLASS);
try (ClassCreator classCreator = ClassCreator.builder()
.className("com.usatiuk.autoprotomap.generated.for" + o.getKey().simpleName())
.signature(SignatureBuilder.forClass().addInterface(type))
.classOutput(gizmoAdapter)
.setFinal(true)
.build()) {
classCreator.addAnnotation(Singleton.class);
var generator = new ProtoSerializerGenerator(
jandex.getIndex(),
protoIndex,
classCreator,
msgJType,
objJType
);
generator.generate();
}
}
} catch (Throwable e) {
StringBuilder sb = new StringBuilder();
sb.append(e + "\n");
for (var el : e.getStackTrace()) {
sb.append(el.toString() + "\n");
}
System.out.println(sb);
}
}
}

View File

@@ -0,0 +1,18 @@
package com.usatiuk.autoprotomap.deployment;
public class Constants {
public static final String FIELD_PREFIX = "_";
public static String capitalize(String str) {
return str.substring(0, 1).toUpperCase() + str.substring(1);
}
public static String stripPrefix(String str, String prefix) {
if (str.startsWith(prefix)) {
return str.substring(prefix.length());
}
return str;
}
}

View File

@@ -0,0 +1,6 @@
package com.usatiuk.autoprotomap.deployment;
@FunctionalInterface
public interface Effect {
void apply();
}

View File

@@ -0,0 +1,10 @@
package com.usatiuk.autoprotomap.deployment;
import io.quarkus.builder.item.SimpleBuildItem;
import org.apache.commons.collections4.BidiMap;
import org.apache.commons.collections4.bidimap.DualHashBidiMap;
import org.jboss.jandex.ClassInfo;
public final class ProtoIndexBuildItem extends SimpleBuildItem {
BidiMap<ClassInfo, ClassInfo> protoMsgToObj = new DualHashBidiMap<>();
}

View File

@@ -0,0 +1,342 @@
package com.usatiuk.autoprotomap.deployment;
import com.google.protobuf.ByteString;
import com.google.protobuf.Message;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.gizmo.*;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.jboss.jandex.*;
import org.jboss.jandex.Type;
import org.objectweb.asm.Opcodes;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntConsumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.usatiuk.autoprotomap.deployment.Constants.*;
public class ProtoSerializerGenerator {
private final Index index;
private final ProtoIndexBuildItem protoIndex;
private final ClassCreator classCreator;
private final HashSet<Pair<ClassInfo, ClassInfo>> externalSerializers = new HashSet<>();
private final Type topMessageType;
private final Type topObjectType;
public ProtoSerializerGenerator(Index index, ProtoIndexBuildItem protoIndex, ClassCreator classCreator, Type topMessageType, Type topObjectType) {
this.index = index;
this.protoIndex = protoIndex;
this.classCreator = classCreator;
this.topMessageType = topMessageType;
this.topObjectType = topObjectType;
}
private FieldDescriptor getOutsideSerializer(ClassInfo messageClass, ClassInfo objectClass) {
var name = messageClass.name().withoutPackagePrefix() + objectClass.name().withoutPackagePrefix() + "serializer";
var msgType = io.quarkus.gizmo.Type.classType(messageClass.name());
var objType = io.quarkus.gizmo.Type.classType(objectClass.name());
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
msgType, objType);
var sig = SignatureBuilder.forField().setType(type).build();
var fd = FieldDescriptor.of(classCreator.getClassName(), name, ProtoSerializer.class);
if (externalSerializers.add(Pair.of(messageClass, objectClass))) {
var fc = classCreator.getFieldCreator(fd);
fc.addAnnotation(Inject.class);
fc.setSignature(sig);
fc.setModifiers(Opcodes.ACC_PUBLIC);
}
return fd;
}
private void traverseHierarchy(Index index, ClassInfo klass, Consumer<ClassInfo> visitor) {
var cur = klass;
while (true) {
visitor.accept(cur);
var next = cur.superClassType().name();
if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break;
cur = index.getClassByName(next);
}
}
private ArrayList<FieldInfo> findAllFields(Index index, ClassInfo klass) {
ArrayList<FieldInfo> ret = new ArrayList<>();
traverseHierarchy(index, klass, cur -> {
ret.addAll(cur.fields());
});
return ret;
}
private void generateBuilderUse(BytecodeCreator bytecodeCreator,
ResultHandle builder,
Type messageType, Type objectType,
ResultHandle object) {
var builderType = Type.create(DotName.createComponentized(messageType.name(), "Builder", true), Type.Kind.CLASS);
var objectClass = index.getClassByName(objectType.name().toString());
Function<String, String> getterGetter = objectClass.isRecord()
? Function.identity()
: s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX));
for (var f : findAllFields(index, objectClass)) {
var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX);
Supplier<ResultHandle> get = () -> {
if ((f.flags() & Opcodes.ACC_PUBLIC) != 0)
return bytecodeCreator.readInstanceField(f, object);
else {
var fieldGetter = getterGetter.apply(f.name());
return bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object);
}
};
Effect doSimpleCopy = () -> {
var setter = MethodDescriptor.ofMethod(builderType.name().toString(), "set" + capitalize(consideredFieldName),
builderType.name().toString(), f.type().toString());
var val = get.get();
bytecodeCreator.invokeVirtualMethod(setter, builder, val);
};
switch (f.type().kind()) {
case CLASS -> {
if (f.type().equals(Type.create(String.class)) || f.type().equals(Type.create(ByteString.class))) {
doSimpleCopy.apply();
} else {
var builderGetter = "get" + capitalize(f.name()) + "Builder";
var protoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(f.type().name()));
var nestedBuilderType = Type.create(DotName.createComponentized(protoType.name(), "Builder", true), Type.Kind.CLASS);
var nestedBuilder = bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(builderType.toString(), builderGetter, nestedBuilderType.name().toString()), builder);
var val = get.get();
generateBuilderUse(bytecodeCreator, nestedBuilder, Type.create(protoType.name(), Type.Kind.CLASS), f.type(), val);
}
}
case PRIMITIVE -> {
doSimpleCopy.apply();
}
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
case PARAMETERIZED_TYPE ->
throw new UnsupportedOperationException("Parametrized types not supported yet");
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
default -> throw new IllegalStateException("Unexpected type: " + f.type());
}
}
}
private ResultHandle generateConstructorUse(
BytecodeCreator bytecodeCreator,
ClassCreator classCreator,
Type messageType, Type objectType,
ResultHandle message
) {
var constructor = findAllArgsConstructor(index, index.getClassByName(objectType.name()));
if (constructor == null) {
throw new IllegalStateException("No constructor found for type: " + objectType.name());
}
var argMap = new ResultHandle[constructor.parametersCount()];
for (int i = 0; i < argMap.length; i++) {
var type = constructor.parameterType(i);
var strippedName = stripPrefix(constructor.parameterName(i), FIELD_PREFIX);
IntConsumer doSimpleCopy = (arg) -> {
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
type.name().toString());
argMap[arg] = bytecodeCreator.invokeVirtualMethod(call, message);
};
switch (type.kind()) {
case CLASS -> {
if (type.equals(Type.create(String.class)) || type.equals(Type.create(ByteString.class))) {
doSimpleCopy.accept(i);
} else {
var nestedProtoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(type.name()));
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
nestedProtoType.name().toString());
var nested = bytecodeCreator.invokeVirtualMethod(call, message);
argMap[i] = generateConstructorUse(bytecodeCreator, classCreator, Type.create(nestedProtoType.name(), Type.Kind.CLASS), type, nested);
}
}
case PRIMITIVE -> {
doSimpleCopy.accept(i);
}
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
case PARAMETERIZED_TYPE ->
throw new UnsupportedOperationException("Parametrized types not supported yet");
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
default -> throw new IllegalStateException("Unexpected type: " + type);
}
}
return bytecodeCreator.newInstance(constructor, argMap);
}
private MethodInfo findAllArgsConstructor(Index index, ClassInfo klass) {
ArrayList<FieldInfo> fields = findAllFields(index, klass);
var fieldCount = fields.size();
var fieldNames = fields.stream().map(f -> stripPrefix(f.name(), FIELD_PREFIX)).sorted().toList();
var fieldNameToType = fields.stream().collect(Collectors.toMap(f -> stripPrefix(f.name(), FIELD_PREFIX), FieldInfo::type));
for (var m : klass.constructors()) {
if (m.parametersCount() != fieldCount) continue;
var parameterNames = m.parameters().stream().map(n -> stripPrefix(n.name(), FIELD_PREFIX)).sorted().toList();
if (!Objects.equals(fieldNames, parameterNames)) continue;
for (var p : m.parameters()) {
if (!Objects.equals(fieldNameToType.get(stripPrefix(p.name(), FIELD_PREFIX)), p.type())) continue;
}
return m;
}
return null;
}
public void generateAbstract() {
var kids = Stream.concat(index.getAllKnownSubclasses(topObjectType.name()).stream(),
index.getAllKnownImplementors(topObjectType.name()).stream())
.filter(k -> !k.isAbstract() && !k.isInterface()).toList();
try (MethodCreator method = classCreator.getMethodCreator("serialize",
Message.class, Object.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
var arg = method.getMethodParam(0);
for (var nestedObjClass : kids) {
System.out.println("Generating " + nestedObjClass.name() + " serializer for " + topObjectType.name());
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
boolean doExternalCall = false;
if (nestedMessageClass == null) {
var msgInfo = index.getClassByName(topMessageType.name());
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
doExternalCall = true;
}
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
var statement = method.ifTrue(method.instanceOf(arg, nestedObjClass.name().toString()));
try (var branch = statement.trueBranch()) {
if (doExternalCall) {
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
var serialized = branch.invokeInterfaceMethod(
MethodDescriptor.ofMethod(ProtoSerializer.class,
"serialize", Message.class, Object.class),
serializerLoaded, arg);
branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
"set" + capitalize(nestedObjType.name().withoutPackagePrefix()),
builderType.name().toString(), nestedMessageType.name().toString()), builder, serialized);
} else {
var nestedBuilderType = Type.create(DotName.createComponentized(nestedMessageType.name(), "Builder", true), Type.Kind.CLASS);
var nestedBuilder = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()) + "Builder",
nestedBuilderType.name().toString()), builder);
generateBuilderUse(branch, nestedBuilder, nestedMessageType, nestedObjType, arg);
}
var result = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
branch.returnValue(result);
}
}
method.throwException(IllegalArgumentException.class, "Unknown object type");
}
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
Object.class, Message.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var arg = method.getMethodParam(0);
for (var nestedObjClass : kids) {
System.out.println("Generating " + nestedObjClass.name() + " deserializer for " + topObjectType.name());
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
boolean doExternalCall = false;
if (nestedMessageClass == null) {
var msgInfo = index.getClassByName(topMessageType.name());
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
doExternalCall = true;
}
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
var typeCheck = method.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
"has" + capitalize(nestedObjType.name().withoutPackagePrefix()), boolean.class), arg);
var statement = method.ifTrue(typeCheck);
try (var branch = statement.trueBranch()) {
var nestedMessage = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()), nestedMessageType.name().toString()), arg);
if (doExternalCall) {
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
branch.returnValue(branch.invokeInterfaceMethod(
MethodDescriptor.ofMethod(ProtoSerializer.class,
"deserialize", Object.class, Message.class),
serializerLoaded, nestedMessage));
} else {
branch.returnValue(generateConstructorUse(branch, classCreator, nestedMessageType, nestedObjType, nestedMessage));
}
}
}
method.throwException(IllegalArgumentException.class, "Unknown object type");
}
}
public void generate() {
var objInfo = index.getClassByName(topObjectType.name());
if (objInfo.isAbstract() || objInfo.isInterface()) {
generateAbstract();
return;
}
try (MethodCreator method = classCreator.getMethodCreator("serialize",
Message.class, Object.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
var arg = method.getMethodParam(0);
generateBuilderUse(method, builder, topMessageType, topObjectType, arg);
var result = method.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
method.returnValue(result);
}
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
Object.class, Message.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var arg = method.getMethodParam(0);
method.returnValue(generateConstructorUse(method, classCreator, topMessageType, topObjectType, arg));
}
}
}

View File

@@ -0,0 +1,22 @@
package com.usatiuk.autoprotomap.test;
import io.quarkus.test.QuarkusDevModeTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class AutoprotomapDevModeTest {
// Start hot reload (DevMode) test with your extension loaded
@RegisterExtension
static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnDevModeTest() {
// Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information
Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName());
}
}

View File

@@ -0,0 +1,22 @@
package com.usatiuk.autoprotomap.test;
import io.quarkus.test.QuarkusUnitTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class AutoprotomapTest {
// Start unit test with your extension loaded
@RegisterExtension
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnUnitTest() {
// Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information
Assertions.assertTrue(true, "Add some assertions to " + getClass().getName());
}
}

View File

@@ -0,0 +1,107 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap-integration-tests</artifactId>
<name>Autoprotomap - Integration Tests</name>
<properties>
<skipITs>true</skipITs>
</properties>
<dependencies>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-deployment</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<systemPropertyVariables>
<native.image.path>${project.build.directory}/${project.build.finalName}-runner
</native.image.path>
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
<maven.home>${maven.home}</maven.home>
</systemPropertyVariables>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>native</id>
<activation>
<property>
<name>native</name>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>${native.surefire.skip}</skipTests>
</configuration>
</plugin>
</plugins>
</build>
<properties>
<skipITs>false</skipITs>
<quarkus.native.enabled>true</quarkus.native.enabled>
</properties>
</profile>
</profiles>
</project>

View File

@@ -0,0 +1,7 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(AbstractProto.class)
public abstract class AbstractObject {
}

View File

@@ -0,0 +1,10 @@
package com.usatiuk.autoprotomap.it;
import lombok.AllArgsConstructor;
import lombok.Getter;
@AllArgsConstructor
@Getter
public class CustomObject extends AbstractObject {
public int testNum = 0;
}

View File

@@ -0,0 +1,17 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import jakarta.inject.Singleton;
@Singleton
public class CustomObjectSerializer implements ProtoSerializer<CustomObjectProto, CustomObject> {
@Override
public CustomObject deserialize(CustomObjectProto message) {
return new CustomObject(2);
}
@Override
public CustomObjectProto serialize(CustomObject object) {
return CustomObjectProto.newBuilder().setTest(1).build();
}
}

View File

@@ -0,0 +1,8 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(InterfaceObjectProto.class)
public interface InterfaceObject {
String key();
}

View File

@@ -0,0 +1,15 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import lombok.AllArgsConstructor;
import lombok.Getter;
@ProtoMirror(NestedObjectProto.class)
@AllArgsConstructor
@Getter
public class NestedObject extends AbstractObject {
public SimpleObject object;
public String _nestedName;
public ByteString _nestedSomeBytes;
}

View File

@@ -0,0 +1,7 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(RecordObjectProto.class)
public record RecordObject(String key) implements InterfaceObject {
}

View File

@@ -0,0 +1,7 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(RecordObject2Proto.class)
public record RecordObject2(String key, int value) implements InterfaceObject {
}

View File

@@ -0,0 +1,15 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import lombok.AllArgsConstructor;
import lombok.Getter;
@ProtoMirror(SimpleObjectProto.class)
@AllArgsConstructor
@Getter
public class SimpleObject extends AbstractObject {
public int numfield = 0;
private String name;
public ByteString someBytes;
}

View File

@@ -0,0 +1,47 @@
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.usatiuk.autoprotomap.it";
option java_outer_classname = "TestProto";
package autoprotomap.test;
message SimpleObjectProto {
int32 numfield = 1;
string name = 2;
bytes someBytes = 3;
}
message NestedObjectProto {
SimpleObjectProto object = 1;
string nestedName = 2;
bytes nestedSomeBytes = 3;
}
message CustomObjectProto {
int64 test = 1;
}
message AbstractProto {
oneof obj {
NestedObjectProto nestedObject = 1;
SimpleObjectProto simpleObject = 2;
CustomObjectProto customObject = 3;
}
}
message RecordObjectProto {
string key = 1;
}
message RecordObject2Proto {
string key = 1;
int32 value = 2;
}
message InterfaceObjectProto {
oneof obj {
RecordObjectProto recordObject = 1;
RecordObject2Proto recordObject2 = 2;
}
}

View File

@@ -0,0 +1,7 @@
package com.usatiuk.autoprotomap.it;
import io.quarkus.test.junit.QuarkusIntegrationTest;
@QuarkusIntegrationTest
public class AutoprotomapResourceIT extends AutoprotomapResourceTest {
}

View File

@@ -0,0 +1,113 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.test.junit.QuarkusTest;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
@QuarkusTest
public class AutoprotomapResourceTest {
@Inject
ProtoSerializer<SimpleObjectProto, SimpleObject> simpleProtoSerializer;
@Inject
ProtoSerializer<NestedObjectProto, NestedObject> nestedProtoSerializer;
@Inject
ProtoSerializer<AbstractProto, AbstractObject> abstractProtoSerializer;
@Inject
ProtoSerializer<InterfaceObjectProto, InterfaceObject> interfaceProtoSerializer;
@Test
public void testSimple() {
var ret = simpleProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
Assertions.assertEquals(1234, ret.getNumfield());
Assertions.assertEquals("simple test", ret.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSomeBytes());
var des = simpleProtoSerializer.deserialize(ret);
Assertions.assertEquals(1234, des.getNumfield());
Assertions.assertEquals("simple test", des.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
}
@Test
public void testNested() {
var ret = nestedProtoSerializer.serialize(
new NestedObject(
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
Assertions.assertEquals(333, ret.getObject().getNumfield());
Assertions.assertEquals("nested so", ret.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", ret.getNestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedSomeBytes());
var des = nestedProtoSerializer.deserialize(ret);
Assertions.assertEquals(333, des.object.numfield);
Assertions.assertEquals(333, des.getObject().getNumfield());
Assertions.assertEquals("nested so", des.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", des.get_nestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
}
@Test
public void testAbstractSimple() {
var ret = abstractProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
Assertions.assertEquals(1234, ret.getSimpleObject().getNumfield());
Assertions.assertEquals("simple test", ret.getSimpleObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSimpleObject().getSomeBytes());
var des = (SimpleObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(1234, des.getNumfield());
Assertions.assertEquals("simple test", des.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
}
@Test
public void testAbstractCustom() {
var ret = abstractProtoSerializer.serialize(new CustomObject(1234));
Assertions.assertEquals(1, ret.getCustomObject().getTest());
var des = (CustomObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(2, des.getTestNum());
}
@Test
public void testAbstractNested() {
var ret = abstractProtoSerializer.serialize(
new NestedObject(
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
Assertions.assertEquals(333, ret.getNestedObject().getObject().getNumfield());
Assertions.assertEquals("nested so", ret.getNestedObject().getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getNestedObject().getObject().getSomeBytes());
Assertions.assertEquals("nested obj", ret.getNestedObject().getNestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedObject().getNestedSomeBytes());
var des = (NestedObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(333, des.object.numfield);
Assertions.assertEquals(333, des.getObject().getNumfield());
Assertions.assertEquals("nested so", des.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", des.get_nestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
}
@Test
public void testInterface() {
var ret = interfaceProtoSerializer.serialize(new RecordObject("record test"));
Assertions.assertEquals("record test", ret.getRecordObject().getKey());
var des = (RecordObject) interfaceProtoSerializer.deserialize(ret);
Assertions.assertEquals("record test", des.key());
var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234));
Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey());
Assertions.assertEquals(1234, ret2.getRecordObject2().getValue());
var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2);
Assertions.assertEquals("record test 2", des2.key());
Assertions.assertEquals(1234, des2.value());
}
}

View File

@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Autoprotomap - Parent</name>
<modules>
<module>deployment</module>
<module>runtime</module>
<module>integration-tests</module>
</modules>
</project>

View File

@@ -0,0 +1,63 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap</artifactId>
<name>Autoprotomap - Runtime</name>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>extension-descriptor</goal>
</goals>
<configuration>
<deployment>${project.groupId}:${project.artifactId}-deployment:${project.version}
</deployment>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<id>default-compile</id>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-processor</artifactId>
<version>${quarkus.platform.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,12 @@
package com.usatiuk.autoprotomap.runtime;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.CLASS)
@Target(ElementType.TYPE)
public @interface ProtoMirror {
Class<?> value() default Object.class;
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs;
package com.usatiuk.autoprotomap.runtime;
import com.google.protobuf.Message;

View File

@@ -0,0 +1,9 @@
name: Autoprotomap
#description: Do something useful.
metadata:
# keywords:
# - autoprotomap
# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension
# categories:
# - "miscellaneous"
# status: "preview"

View File

@@ -1,127 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>sync-base</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
true
</junit.jupiter.execution.parallel.enabled>
<junit.jupiter.execution.parallel.mode.default>
concurrent
</junit.jupiter.execution.parallel.mode.default>
<junit.jupiter.execution.parallel.config.dynamic.factor>
0.5
</junit.jupiter.execution.parallel.config.dynamic.factor>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,18 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
/**
* ChunkData is a data structure that represents an immutable binary blob
* @param key unique key
* @param data binary data
*/
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
@Override
public int estimateSize() {
return data.size();
}
}

View File

@@ -1,20 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
/**
* FileDto is a data transfer object that contains a file and its chunks.
* @param file the file
* @param chunks the list of chunks, each represented as a pair of a long and a JObjectKey
*/
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
@Override
public Class<? extends JDataRemote> objClass() {
return File.class;
}
}

View File

@@ -1,22 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
/**
* JKleppmannTreeNodeMetaDirectory is a record that represents a directory in the JKleppmann tree.
* @param name the name of the directory
*/
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaDirectory(name);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of();
}
}

View File

@@ -1,24 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
/**
* JKleppmannTreeNodeMetaFile is a record that represents a file in the JKleppmann tree.
* @param name the name of the file
* @param fileIno a reference to the `File` object
*/
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
@Override
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaFile(name, fileIno);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of(fileIno);
}
}

View File

@@ -1,13 +0,0 @@
package com.usatiuk.dhfsfs.service;
/**
* DirectoryNotEmptyException is thrown when a directory is not empty.
* This exception is used to indicate that a directory cannot be deleted
* because it contains files or subdirectories.
*/
public class DirectoryNotEmptyException extends RuntimeException {
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
}

View File

@@ -1,11 +0,0 @@
package com.usatiuk.dhfsfs.service;
/**
* GetattrRes is a record that represents the result of a getattr operation.
* @param mtime File modification time
* @param ctime File creation time
* @param mode File mode
* @param type File type
*/
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
}

View File

@@ -1,29 +0,0 @@
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -1,40 +0,0 @@
package com.usatiuk.dhfsfs;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
public void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
}
}

View File

@@ -1,11 +0,0 @@
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false
dhfs.objects.persistence.snapshot-extra-checks=true

View File

@@ -1,5 +0,0 @@
*
!target/*-runner
!target/*-runner.jar
!target/lib/*
!target/quarkus-app/*

View File

@@ -1,43 +0,0 @@
#Maven
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
release.properties
.flattened-pom.xml
# Eclipse
.project
.classpath
.settings/
bin/
# IntelliJ
.idea
*.ipr
*.iml
*.iws
# NetBeans
nb-configuration.xml
# Visual Studio Code
.vscode
.factorypath
# OSX
.DS_Store
# Vim
*.swp
*.swo
# patch
*.orig
*.rej
# Local environment
.env
# Plugin directory
/.quarkus/cli/plugins/

View File

@@ -1,2 +0,0 @@
FROM azul/zulu-openjdk-debian:21-jre-latest
RUN apt update && apt install -y libfuse2 curl

View File

@@ -1,43 +0,0 @@
version: "3.2"
services:
dhfs1:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs1:/dhfs_root
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-jar /app/quarkus-run.jar"
ports:
- 8080:8080
- 8081:8443
- 5005:5005
dhfs2:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs2:/dhfs_root
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
--add-opens=java.base/java.nio=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
-jar /app/quarkus-run.jar"
ports:
- 8090:8080
- 8091:8443
- 5010:5010

View File

@@ -1,143 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fuse</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.serceman</groupId>
<artifactId>jnr-fuse</artifactId>
<version>0.5.8</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,97 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
# We make four distinct layers so if there are application changes the library layers can be re-used
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
COPY --chown=185 target/quarkus-app/*.jar /deployments/
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -1,93 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
COPY target/lib/* /deployments/lib/
COPY target/*-runner.jar /deployments/quarkus-run.jar
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -1,27 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,30 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
# It uses a micro base image, tuned for Quarkus native executables.
# It reduces the size of the resulting container image.
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM quay.io/quarkus/quarkus-micro-image:2.0
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,24 +0,0 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.fuse.root=${HOME}/dhfs_default/fuse
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.objects.autosync.threads=8
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=8
dhfs.objects.ref-processor.threads=8
dhfs.local-discovery=true
dhfs.peerdiscovery.timeout=10000
quarkus.log.category."com.usatiuk".min-level=TRACE
quarkus.log.category."com.usatiuk".level=TRACE
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required

View File

@@ -1,29 +0,0 @@
package com.usatiuk.dhfsfuse;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -1,40 +0,0 @@
package com.usatiuk.dhfsfuse;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
public static void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
}
}

View File

@@ -1,238 +0,0 @@
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class KillIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
Network network;
ExecutorService executor;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("").toFile();
data2 = Files.createTempDirectory("").toFile();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data2);
executor.close();
network.close();
}
private void checkConsistency() {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@Test
void killTest(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTest2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
}

View File

@@ -1,215 +0,0 @@
package com.usatiuk.dhfsfuse.integration;
import io.quarkus.logging.Log;
import java.io.*;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
public class LazyFs {
private static final String lazyFsPath;
static {
lazyFsPath = System.getProperty("lazyFsPath");
System.out.println("LazyFs Path: " + lazyFsPath);
}
private final String mountRoot;
private final String dataRoot;
private final String name;
private final File configFile;
private final File fifoFile;
private Thread errPiper;
private Thread outPiper;
private CountDownLatch startLatch;
private Process fs;
public LazyFs(String name, String mountRoot, String dataRoot) {
this.name = name;
this.mountRoot = mountRoot;
this.dataRoot = dataRoot;
try {
configFile = File.createTempFile("lazyfs", ".conf");
configFile.deleteOnExit();
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
fifoFile.deleteOnExit();
} catch (IOException e) {
throw new RuntimeException(e);
}
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
}
private String fifoPath() {
return fifoFile.getAbsolutePath();
}
public void start(String extraOpts) {
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
if (!lfsPath.toFile().isFile())
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
if (!lfsPath.toFile().canExecute())
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
try (var rwFile = new RandomAccessFile(configFile, "rw");
var channel = rwFile.getChannel()) {
channel.truncate(0);
var config = "[faults]\n" +
"fifo_path=\"" + fifoPath() + "\"\n" +
"[cache]\n" +
"apply_eviction=false\n" +
"[cache.simple]\n" +
"custom_size=\"1gb\"\n" +
"blocks_per_page=1\n" +
"[filesystem]\n" +
"log_all_operations=false\n" +
"logfile=\"\"\n" + extraOpts;
rwFile.write(config.getBytes());
Log.info("LazyFs config: \n" + config);
} catch (Exception e) {
throw new RuntimeException(e);
}
var argList = new ArrayList<String>();
argList.add(lfsPath.toString());
argList.add(Path.of(mountRoot).toString());
argList.add("--config-path");
argList.add(configFile.getAbsolutePath());
argList.add("-o");
argList.add("allow_other");
argList.add("-o");
argList.add("modules=subdir");
argList.add("-o");
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
try {
Log.info("Starting LazyFs " + argList);
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
} catch (Exception e) {
throw new RuntimeException(e);
}
startLatch = new CountDownLatch(1);
outPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
String line;
while ((line = input.readLine()) != null) {
if (line.contains("running LazyFS"))
startLatch.countDown();
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs out piper finished");
});
outPiper.start();
errPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
String line;
while ((line = input.readLine()) != null) {
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs err piper finished");
});
errPiper.start();
try {
if (!startLatch.await(30, TimeUnit.SECONDS))
throw new RuntimeException("StartLatch timed out");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
Log.info("LazyFs started");
}
public void start() {
start("");
}
private String mdbPath() {
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
}
public void startTornOp() {
start("\n" +
"[[injection]]\n" +
"type=\"torn-seq\"\n" +
"op=\"write\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"persist=[1,4]\n" +
"occurrence=3");
}
public void startTornSeq() {
start("[[injection]]\n" +
"type=\"torn-op\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"occurrence=3\n" +
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
"persist=[1,3]");
}
public void crash() {
try {
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
Log.info("Running command: " + cmd);
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void stop() {
try {
synchronized (this) {
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
// Doesn't actually work?
//
// public void crashop() {
// try {
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
//
// public void crashseq() {
// try {
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
}

View File

@@ -1,490 +0,0 @@
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class LazyFsIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
File data1Lazy;
File data2Lazy;
LazyFs lazyFs1;
LazyFs lazyFs2;
ExecutorService executor;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("dhfsdata").toFile();
data2 = Files.createTempDirectory("dhfsdata").toFile();
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
network = Network.newNetwork();
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
lazyFs1.start();
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
lazyFs2.start();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
lazyFs1.stop();
lazyFs2.stop();
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data1Lazy);
TestDataCleaner.purgeDirectory(data2);
TestDataCleaner.purgeDirectory(data2Lazy);
executor.close();
network.close();
}
private void checkConsistency(String testName) {
await().atMost(120, TimeUnit.SECONDS).until(() -> {
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info("Listing consistency " + testName + "\n"
+ ls1 + "\n"
+ cat1 + "\n"
+ ls2 + "\n"
+ cat2 + "\n");
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Log.info("Killing");
Thread.sleep(3000);
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Thread.sleep(3000);
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
private static enum CrashType {
CRASH,
TORN_OP,
TORN_SEQ
}
}

View File

@@ -1,11 +0,0 @@
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false
dhfs.objects.persistence.snapshot-extra-checks=true

View File

@@ -1,8 +1,5 @@
package com.usatiuk.kleppmanntree;
/**
* Exception thrown when an attempt is made to create a new tree node as a child with a name that already exists.
*/
public class AlreadyExistsException extends RuntimeException {
public AlreadyExistsException(String message) {
super(message);

View File

@@ -0,0 +1,32 @@
package com.usatiuk.kleppmanntree;
import java.io.Serializable;
public class AtomicClock implements Clock<Long>, Serializable {
private long _max = 0;
public AtomicClock(long counter) {
_max = counter;
}
@Override
public Long getTimestamp() {
return ++_max;
}
public void setTimestamp(Long timestamp) {
_max = timestamp;
}
@Override
public Long peekTimestamp() {
return _max;
}
@Override
public Long updateTimestamp(Long receivedTimestamp) {
var old = _max;
_max = Math.max(_max, receivedTimestamp) + 1;
return old;
}
}

View File

@@ -1,26 +1,9 @@
package com.usatiuk.kleppmanntree;
/**
* Clock interface
*/
public interface Clock<TimestampT extends Comparable<TimestampT>> {
/**
* Increment and get the current timestamp.
* @return the incremented timestamp
*/
TimestampT getTimestamp();
/**
* Get the current timestamp without incrementing it.
* @return the current timestamp
*/
TimestampT peekTimestamp();
/**
* Update the timestamp with an externally received timestamp.
* Will set the currently stored timestamp to <code>max(receivedTimestamp, currentTimestamp) + 1</code>
* @param receivedTimestamp the received timestamp
* @return the previous timestamp
*/
TimestampT updateTimestamp(TimestampT receivedTimestamp);
}

View File

@@ -3,13 +3,6 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
import java.util.Comparator;
/**
* CombinedTimestamp is a record that represents a timestamp and a node ID, ordered first by timestamp and then by node ID.
* @param timestamp the timestamp
* @param nodeId the node ID. If null, then only the timestamp is used for ordering.
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the node ID
*/
public record CombinedTimestamp<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>>
(TimestampT timestamp,
PeerIdT nodeId) implements Comparable<CombinedTimestamp<TimestampT, PeerIdT>>, Serializable {

View File

@@ -1,5 +1,7 @@
package com.usatiuk.kleppmanntree;
import jakarta.annotation.Nonnull;
import jakarta.annotation.Nullable;
import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
@@ -8,14 +10,6 @@ import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* An implementation of a tree as described in <a href="https://martin.kleppmann.com/papers/move-op.pdf">A highly-available move operation for replicated trees</a>
*
* @param <TimestampT> Type of the timestamp
* @param <PeerIdT> Type of the peer ID
* @param <MetaT> Type of the node metadata
* @param <NodeIdT> Type of the node ID
*/
public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
private static final Logger LOGGER = Logger.getLogger(KleppmannTree.class.getName());
@@ -23,15 +17,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
private final PeerInterface<PeerIdT> _peers;
private final Clock<TimestampT> _clock;
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
/**
* Constructor with all the dependencies
*
* @param storage Storage interface
* @param peers Peer interface
* @param clock Clock interface
* @param opRecorder Operation recorder interface
*/
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
PeerInterface<PeerIdT> peers,
Clock<TimestampT> clock,
@@ -42,13 +29,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
_opRecorder = opRecorder;
}
/**
* Traverse the tree from the given node ID using the given list of names
*
* @param fromId The starting node ID
* @param names The list of names to traverse
* @return The resulting node ID or null if not found
*/
private NodeIdT traverseImpl(NodeIdT fromId, List<String> names) {
if (names.isEmpty()) return fromId;
@@ -62,21 +42,14 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return traverseImpl(childId, names.subList(1, names.size()));
}
/**
* Traverse the tree from its root node using the given list of names
*
* @param names The list of names to traverse
* @return The resulting node ID or null if not found
*/
public NodeIdT traverse(NodeIdT fromId, List<String> names) {
return traverseImpl(fromId, names.subList(1, names.size()));
}
public NodeIdT traverse(List<String> names) {
return traverseImpl(_storage.getRootId(), names);
}
/**
* Undo the effect of a log effect
*
* @param effect The log effect to undo
*/
private void undoEffect(LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT> effect) {
if (effect.oldInfo() != null) {
var node = _storage.getById(effect.childId());
@@ -116,14 +89,10 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
node.withParent(null)
.withLastEffectiveOp(null)
);
_undoCtx.put(node.key(), node);
}
}
/**
* Undo the effects of a log record
*
* @param op The log record to undo
*/
private void undoOp(LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
LOGGER.finer(() -> "Will undo op: " + op);
if (op.effects() != null)
@@ -131,32 +100,16 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
undoEffect(e);
}
/**
* Redo the operation in a log record
*
* @param entry The log record to redo
*/
private void redoOp(Map.Entry<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> entry) {
var newEffects = doOp(entry.getValue().op(), false);
_storage.getLog().replace(entry.getKey(), newEffects);
}
/**
* Perform the operation and put it in the log
*
* @param op The operation to perform
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
private void doAndPut(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
var res = doOp(op, failCreatingIfExists);
_storage.getLog().put(res.op().timestamp(), res);
}
/**
* Try to trim the log to the causality threshold
*/
private void tryTrimLog() {
var log = _storage.getLog();
var timeLog = _storage.getPeerTimestampLog();
@@ -212,52 +165,22 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
}
/**
* Move a node to a new parent with new metadata
*
* @param newParent The new parent node ID
* @param newMeta The new metadata
* @param child The child node ID
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
public <LocalMetaT extends MetaT> void move(NodeIdT newParent, LocalMetaT newMeta, NodeIdT child) {
move(newParent, newMeta, child, true);
}
/**
* Move a node to a new parent with new metadata
*
* @param newParent The new parent node ID
* @param newMeta The new metadata
* @param child The child node ID
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
public void move(NodeIdT newParent, MetaT newMeta, NodeIdT child, boolean failCreatingIfExists) {
var createdMove = createMove(newParent, newMeta, child);
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
_opRecorder.recordOp(createdMove);
}
/**
* Apply an external operation from a remote peer
*
* @param from The peer ID
* @param op The operation to apply
*/
public void applyExternalOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
_clock.updateTimestamp(op.timestamp().timestamp());
applyOp(from, op, false);
}
/**
* Update the causality threshold timestamp for a peer
*
* @param from The peer ID
* @param newTimestamp The timestamp received from it
* @return True if the timestamp was updated, false otherwise
*/
// Returns true if the timestamp is newer than what's seen, false otherwise
private boolean updateTimestampImpl(PeerIdT from, TimestampT newTimestamp) {
TimestampT oldRef = _storage.getPeerTimestampLog().getForPeer(from);
if (oldRef != null && oldRef.compareTo(newTimestamp) >= 0) { // FIXME?
@@ -268,12 +191,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return true;
}
/**
* Update the causality threshold timestamp for a peer
*
* @param from The peer ID
* @param timestamp The timestamp received from it
*/
public void updateExternalTimestamp(PeerIdT from, TimestampT timestamp) {
var gotExt = _storage.getPeerTimestampLog().getForPeer(from);
var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId());
@@ -284,15 +201,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
tryTrimLog();
}
/**
* Apply an operation from a peer
*
* @param from The peer ID
* @param op The operation to apply
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
private void applyOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
if (!updateTimestampImpl(op.timestamp().nodeId(), op.timestamp().timestamp())) return;
@@ -309,52 +217,45 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
assert cmp != 0;
if (cmp < 0) {
if (log.containsKey(op.timestamp())) return;
var toUndo = log.newestSlice(op.timestamp(), false);
for (var entry : toUndo.reversed()) {
undoOp(entry.getValue());
try {
if (log.containsKey(op.timestamp())) return;
var toUndo = log.newestSlice(op.timestamp(), false);
_undoCtx = new HashMap<>();
for (var entry : toUndo.reversed()) {
undoOp(entry.getValue());
}
try {
doAndPut(op, failCreatingIfExists);
} finally {
for (var entry : toUndo) {
redoOp(entry);
}
if (!_undoCtx.isEmpty()) {
for (var e : _undoCtx.entrySet()) {
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
_storage.removeNode(e.getKey());
}
}
_undoCtx = null;
}
} finally {
tryTrimLog();
}
doAndPut(op, failCreatingIfExists);
for (var entry : toUndo) {
redoOp(entry);
}
tryTrimLog();
} else {
doAndPut(op, failCreatingIfExists);
tryTrimLog();
}
}
/**
* Get a new timestamp, incrementing the one in storage
*
* @return A new timestamp
*/
private CombinedTimestamp<TimestampT, PeerIdT> getTimestamp() {
return new CombinedTimestamp<>(_clock.getTimestamp(), _peers.getSelfId());
}
/**
* Create a new move operation
*
* @param newParent The new parent node ID
* @param newMeta The new metadata
* @param node The child node ID
* @return A new move operation
*/
private <LocalMetaT extends MetaT> OpMove<TimestampT, PeerIdT, LocalMetaT, NodeIdT> createMove(NodeIdT newParent, LocalMetaT newMeta, NodeIdT node) {
return new OpMove<>(getTimestamp(), newParent, newMeta, node);
}
/**
* Perform the operation and return the log record
*
* @param op The operation to perform
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @return The log record
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> doOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
LOGGER.finer(() -> "Doing op: " + op);
LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computed;
@@ -363,7 +264,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
} catch (AlreadyExistsException aex) {
throw aex;
} catch (Exception e) {
throw new RuntimeException("Error computing effects for op " + op.toString(), e);
LOGGER.log(Level.SEVERE, "Error computing effects for op" + op.toString(), e);
computed = new LogRecord<>(op, null);
}
if (computed.effects() != null)
@@ -371,24 +273,28 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return computed;
}
/**
* Get a new node from storage
*
* @param key The node ID
* @param parent The parent node ID
* @param meta The metadata
* @return A new tree node
*/
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
if (_undoCtx != null) {
var node = _undoCtx.get(key);
if (node != null) {
try {
if (!node.children().isEmpty()) {
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
}
node = node.withParent(parent).withMeta(meta);
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
node = null;
}
}
if (node != null) {
_undoCtx.remove(key);
return node;
}
}
return _storage.createNewNode(key, parent, meta);
}
/**
* Apply the effects of a log record
*
* @param sourceOp The source operation
* @param effects The list of log effects
*/
private void applyEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> sourceOp, List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) {
for (var effect : effects) {
LOGGER.finer(() -> "Applying effect: " + effect + " from op " + sourceOp);
@@ -429,15 +335,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
}
/**
* Compute the effects of a move operation
*
* @param op The operation to process
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @return The log record with the computed effects
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computeEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
var node = _storage.getById(op.childId());
@@ -475,6 +372,10 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
var conflictNode = _storage.getById(conflictNodeId);
MetaT conflictNodeMeta = conflictNode.meta();
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
return new LogRecord<>(op, null);
}
LOGGER.finer(() -> "Node creation conflict: " + conflictNode);
String newConflictNodeName = op.newName() + ".conflict." + conflictNode.key();
@@ -499,14 +400,18 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
if (oldMeta != null
&& op.newMeta() != null
&& !oldMeta.getClass().equals(op.newMeta().getClass())) {
throw new RuntimeException("Class mismatch for meta for node " + node.key());
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
return new LogRecord<>(op, null);
}
var replaceNodeId = newParent.children().get(op.newName());
if (replaceNodeId != null) {
var replaceNode = _storage.getById(replaceNodeId);
var replaceNodeMeta = replaceNode.meta();
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
return new LogRecord<>(op, null);
}
LOGGER.finer(() -> "Node replacement: " + replaceNode);
return new LogRecord<>(op, List.of(
@@ -521,13 +426,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
));
}
/**
* Check if a node is an ancestor of another node
*
* @param child The child node ID
* @param parent The parent node ID
* @return True if the child is an ancestor of the parent, false otherwise
*/
private boolean isAncestor(NodeIdT child, NodeIdT parent) {
var node = _storage.getById(parent);
NodeIdT curParent;
@@ -538,11 +436,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return false;
}
/**
* Walk the tree and apply the given consumer to each node
*
* @param consumer The consumer to apply to each node
*/
public void walkTree(Consumer<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> consumer) {
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
queue.push(_storage.getRootId());
@@ -556,12 +449,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
}
/**
* Find the parent of a node that matches the given predicate
*
* @param kidPredicate The predicate to match the child node
* @return A pair containing the name of the child and the ID of the parent, or null if not found
*/
public Pair<String, NodeIdT> findParent(Function<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>, Boolean> kidPredicate) {
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
queue.push(_storage.getRootId());
@@ -582,13 +469,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return null;
}
/**
* Record the bootstrap operations for a given peer
* Will visit all nodes of the tree and add their effective operations to both the queue to be sent to the peer,
* and to the global operation log.
*
* @param host The peer ID
*/
public void recordBoostrapFor(PeerIdT host) {
TreeMap<CombinedTimestamp<TimestampT, PeerIdT>, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT>> result = new TreeMap<>();

View File

@@ -2,18 +2,6 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
/**
* LogEffect is a record that represents the effect of a log entry on a tree node.
* @param oldInfo the old information about the node, before it was moved. Null if the node did not exist before
* @param effectiveOp the operation that had caused this effect to be applied
* @param newParentId the ID of the new parent node
* @param newMeta the new metadata of the node
* @param childId the ID of the child node
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>(
LogEffectOld<TimestampT, PeerIdT, MetaT, NodeIdT> oldInfo,
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> effectiveOp,
@@ -22,14 +10,14 @@ public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT exten
NodeIdT childId) implements Serializable {
public String oldName() {
if (oldInfo.oldMeta() != null) {
return oldInfo.oldMeta().name();
return oldInfo.oldMeta().getName();
}
return childId.toString();
}
public String newName() {
if (newMeta != null) {
return newMeta.name();
return newMeta.getName();
}
return childId.toString();
}

View File

@@ -2,16 +2,6 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
/**
* Represents the old information about a node before it was moved.
* @param oldEffectiveMove the old effective move that had caused this effect to be applied
* @param oldParent the ID of the old parent node
* @param oldMeta the old metadata of the node
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public record LogEffectOld<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> oldEffectiveMove,
NodeIdT oldParent,

View File

@@ -4,82 +4,29 @@ import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
/**
* LogInterface is an interface that allows accessing the log of operations
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public interface LogInterface<
TimestampT extends Comparable<TimestampT>,
PeerIdT extends Comparable<PeerIdT>,
MetaT extends NodeMeta,
NodeIdT> {
/**
* Peek the oldest log entry.
* @return the oldest log entry
*/
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> peekOldest();
/**
* Take the oldest log entry.
* @return the oldest log entry
*/
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> takeOldest();
/**
* Peek the newest log entry.
* @return the newest log entry
*/
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> peekNewest();
/**
* Return all log entries that are newer than the given timestamp.
* @param since the timestamp to compare with
* @param inclusive if true, include the log entry with the given timestamp
* @return a list of log entries that are newer than the given timestamp
*/
List<Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>>>
newestSlice(CombinedTimestamp<TimestampT, PeerIdT> since, boolean inclusive);
/**
* Return all the log entries
* @return a list of all log entries
*/
List<Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>>> getAll();
/**
* Checks if the log is empty.
* @return true if the log is empty, false otherwise
*/
boolean isEmpty();
/**
* Checks if the log contains the given timestamp.
* @param timestamp the timestamp to check
* @return true if the log contains the given timestamp, false otherwise
*/
boolean containsKey(CombinedTimestamp<TimestampT, PeerIdT> timestamp);
/**
* Get the size of the log.
* @return the size of the log (number of entries)
*/
long size();
/**
* Add a log entry to the log.
* @param timestamp the timestamp of the log entry
* @param record the log entry
* @throws IllegalStateException if the log entry already exists
*/
void put(CombinedTimestamp<TimestampT, PeerIdT> timestamp, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> record);
/**
* Replace a log entry in the log.
* @param timestamp the timestamp of the log entry
* @param record the log entry
*/
void replace(CombinedTimestamp<TimestampT, PeerIdT> timestamp, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> record);
}

View File

@@ -3,15 +3,6 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
import java.util.List;
/**
* Represents a log record in the Kleppmann tree.
* @param op the operation that is stored in this log record
* @param effects the effects of the operation (resulting moves)
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public record LogRecord<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op,
List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) implements Serializable {

View File

@@ -2,24 +2,8 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
/**
* Represents metadata associated with a node in the Kleppmann tree.
* This interface is used to define the metadata that can be associated with nodes in the tree.
* Implementations of this interface should provide a name for the node and a method to create a copy of it with a new name.
*/
public interface NodeMeta extends Serializable {
/**
* Returns the name of the node.
*
* @return the name of the node
*/
String name();
String getName();
/**
* Creates a copy of the metadata with a new name.
*
* @param name the new name for the metadata
* @return a new instance of NodeMeta with the specified name
*/
NodeMeta withName(String name);
}

View File

@@ -2,30 +2,12 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
/**
* Operation that moves a child node to a new parent node.
*
* @param timestamp the timestamp of the operation
* @param newParentId the ID of the new parent node
* @param newMeta the new metadata of the node, can be null
* @param childId the ID of the child node (the node that is being moved)
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
(CombinedTimestamp<TimestampT, PeerIdT> timestamp, NodeIdT newParentId, MetaT newMeta,
NodeIdT childId) implements Serializable {
/**
* Returns the new name of the node: name extracted from the new metadata if available,
* otherwise the child ID converted to string.
*
* @return the new name of the node
*/
public String newName() {
if (newMeta != null)
return newMeta.name();
return newMeta.getName();
return childId.toString();
}
}

View File

@@ -1,26 +1,7 @@
package com.usatiuk.kleppmanntree;
/**
* Interface to provide recording operations to be sent to peers asynchronously.
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public interface OpRecorder<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
/**
* Records an operation to be sent to peers asynchronously.
* The operation will be sent to all known peers in the system.
*
* @param op the operation to be recorded
*/
void recordOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op);
/**
* Records an operation to be sent to a specific peer asynchronously.
*
* @param peer the ID of the peer to send the operation to
* @param op the operation to be recorded
*/
void recordOpForPeer(PeerIdT peer, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op);
}

View File

@@ -2,22 +2,8 @@ package com.usatiuk.kleppmanntree;
import java.util.Collection;
/**
* Interface providing access to a list of known peers.
* @param <PeerIdT> the type of the peer ID
*/
public interface PeerInterface<PeerIdT extends Comparable<PeerIdT>> {
/**
* Returns the ID of the current peer.
*
* @return the ID of the current peer
*/
PeerIdT getSelfId();
/**
* Returns a collection of all known peers.
*
* @return a collection of all known peers
*/
Collection<PeerIdT> getAllPeers();
}

View File

@@ -1,26 +1,11 @@
package com.usatiuk.kleppmanntree;
/**
* Interface providing a map of newest received timestamps for each peer. (causality thresholds)
* If a peer has some timestamp recorded in this map,
* it means that all messages coming from this peer will have a newer timestamp.
* @param <TimestampT>
* @param <PeerIdT>
*/
public interface PeerTimestampLogInterface<
TimestampT extends Comparable<TimestampT>,
PeerIdT extends Comparable<PeerIdT>> {
/**
* Get the timestamp for a specific peer.
* @param peerId the ID of the peer
* @return the timestamp for the peer
*/
TimestampT getForPeer(PeerIdT peerId);
/**
* Get the timestamp for the current peer.
*/
void putForPeer(PeerIdT peerId, TimestampT timestamp);
}

View File

@@ -1,89 +1,28 @@
package com.usatiuk.kleppmanntree;
/**
* Storage interface for the Kleppmann tree.
*
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public interface StorageInterface<
TimestampT extends Comparable<TimestampT>,
PeerIdT extends Comparable<PeerIdT>,
MetaT extends NodeMeta,
NodeIdT> {
/**
* Get the root node ID.
*
* @return the root node IDx
*/
NodeIdT getRootId();
/**
* Get the trash node ID.
*
* @return the trash node ID
*/
NodeIdT getTrashId();
/**
* Get the lost and found node ID.
*
* @return the lost and found node ID
*/
NodeIdT getLostFoundId();
/**
* Get the new node ID.
*
* @return the new node ID
*/
NodeIdT getNewNodeId();
/**
* Get the node by its ID.
*
* @param id the ID of the node
* @return the node with the specified ID, or null if not found
*/
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getById(NodeIdT id);
/**
* Create a new node with the specified key, parent, and metadata.
*
* @param key the ID of the new node
* @param parent the ID of the parent node
* @param meta the metadata of the new node
* @return the new node
*/
// Creates a node, returned wrapper is RW-locked
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> createNewNode(NodeIdT key, NodeIdT parent, MetaT meta);
/**
* Put a node into the storage.
*
* @param node the node to put into the storage
*/
void putNode(TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> node);
/**
* Remove a node from the storage.
*
* @param id the ID of the node to remove
*/
void removeNode(NodeIdT id);
/**
* Get the log interface.
*
* @return the log interface
*/
LogInterface<TimestampT, PeerIdT, MetaT, NodeIdT> getLog();
/**
* Get the peer timestamp log interface.
*
* @return the peer timestamp log interface
*/
PeerTimestampLogInterface<TimestampT, PeerIdT> getPeerTimestampLog();
}

View File

@@ -5,92 +5,29 @@ import org.pcollections.PMap;
import java.io.Serializable;
/**
* Represents a node in the Kleppmann tree.
*
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> extends Serializable {
/**
* Get the ID of the node.
*
* @return the ID of the node
*/
NodeIdT key();
/**
* Get the ID of the parent node.
*
* @return the ID of the parent node
*/
NodeIdT parent();
/**
* Get the last effective operation that moved this node.
*
* @return the last effective operation
*/
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp();
/**
* Get the metadata stored in this node.
*
* @return the metadata of the node
*/
@Nullable
MetaT meta();
/**
* Get the name of the node.
* If the node has metadata, the name is extracted from it, otherwise the key is converted to string.
*
* @return the name of the node
*/
default String name() {
var meta = meta();
if (meta != null) return meta.name();
if (meta != null) return meta.getName();
return key().toString();
}
/**
* Get the children of this node.
*
* @return a map of child IDs to their respective nodes
*/
PMap<String, NodeIdT> children();
/**
* Make a copy of this node with a new parent.
*
* @param parent the ID of the new parent node
* @return a new TreeNode instance with the updated parent
*/
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withParent(NodeIdT parent);
/**
* Make a copy of this node with a new last effective operation.
*
* @param lastEffectiveOp the new last effective operation
* @return a new TreeNode instance with the updated last effective operation
*/
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withLastEffectiveOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp);
/**
* Make a copy of this node with new metadata.
*
* @param meta the new metadata
* @return a new TreeNode instance with the updated metadata
*/
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withMeta(MetaT meta);
/**
* Make a copy of this node with new children.
*
* @param children the new children
* @return a new TreeNode instance with the updated children
*/
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withChildren(PMap<String, NodeIdT> children);
}

View File

@@ -8,7 +8,7 @@ public abstract class TestNodeMeta implements NodeMeta {
}
@Override
public String name() {
public String getName() {
return _name;
}

View File

@@ -18,11 +18,6 @@
</properties>
<dependencies>
<dependency>
<groupId>net.jqwik</groupId>
<artifactId>jqwik</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
@@ -36,6 +31,10 @@
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
@@ -55,6 +54,11 @@
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>supportlib</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5-mockito</artifactId>
@@ -84,11 +88,6 @@
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
@@ -100,6 +99,7 @@
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>

View File

@@ -2,20 +2,14 @@ package com.usatiuk.objects;
import java.io.Serializable;
/**
* JData is a marker interface for all objects that can be stored in the object store.
*/
// TODO: This could be maybe moved to a separate module?
// The base class for JObject data
// Only one instance of this "exists" per key, the instance in the manager is canonical
// When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred.
// It is immutable, its version is filled in by the allocator from the AllocVersionProvider
public interface JData extends Serializable {
/**
* Returns the key of the object.
* @return the key of the object
*/
JObjectKey key();
/**
* Returns the estimated size of the object in bytes.
* @return the estimated size of the object in bytes
*/
default int estimateSize() {
return 100;
}

View File

@@ -1,35 +1,9 @@
package com.usatiuk.objects;
import com.usatiuk.objects.iterators.Data;
/**
* JDataVersionedWrapper is a wrapper for JData that contains its version number
* (the id of the transaction that had changed it last)
*/
public sealed interface JDataVersionedWrapper extends Data<JDataVersionedWrapper> permits JDataVersionedWrapperLazy, JDataVersionedWrapperImpl {
@Override
default JDataVersionedWrapper value() {
return this;
}
/**
* Returns the wrapped object.
*
* @return the wrapped object
*/
public interface JDataVersionedWrapper {
JData data();
/**
* Returns the version number of the object.
*
* @return the version number of the object
*/
long version();
/**
* Returns the estimated size of the object in bytes.
*
* @return the estimated size of the object in bytes
*/
int estimateSize();
}

View File

@@ -4,9 +4,6 @@ import jakarta.annotation.Nonnull;
import java.io.Serializable;
/**
* Simple wrapper for an already-existing JData object with a version.
*/
public record JDataVersionedWrapperImpl(@Nonnull JData data,
long version) implements Serializable, JDataVersionedWrapper {
@Override

View File

@@ -2,47 +2,18 @@ package com.usatiuk.objects;
import java.util.function.Supplier;
/**
* Lazy JDataVersionedWrapper implementation.
* The object is deserialized only when data() is called for the first time.
* Also allows to set a callback to be called when the data is loaded (e.g. to cache it).
*/
public final class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
public class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
private final long _version;
private final int _estimatedSize;
private JData _data;
private Supplier<JData> _producer;
private JData _data;
/**
* Creates a new JDataVersionedWrapperLazy object.
*
* @param version the version number of the object
* @param estimatedSize the estimated size of the object in bytes
* @param producer a supplier that produces the wrapped object
*/
public JDataVersionedWrapperLazy(long version, int estimatedSize, Supplier<JData> producer) {
_version = version;
_estimatedSize = estimatedSize;
_producer = producer;
}
/**
* Set a callback to be called when the data is loaded.
*
* @param cacheCallback the callback to be called
*/
public void setCacheCallback(Runnable cacheCallback) {
if (_data != null) {
throw new IllegalStateException("Cache callback can be set only before data is loaded");
}
var oldProducer = _producer;
_producer = () -> {
var ret = oldProducer.get();
cacheCallback.run();
return ret;
};
}
public JData data() {
if (_data != null)
return _data;

View File

@@ -2,26 +2,17 @@ package com.usatiuk.objects;
import com.google.protobuf.ByteString;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import jakarta.inject.Singleton;
import java.nio.ByteBuffer;
/**
* Serializer for JDataVersionedWrapper objects.
* The objects are stored in a simple format: first is 8-byte long, then the serialized object.
*/
@Singleton
public class JDataVersionedWrapperSerializer {
@ApplicationScoped
public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVersionedWrapper> {
@Inject
ObjectSerializer<JData> dataSerializer;
/**
* Serializes a JDataVersionedWrapper object to a ByteString.
*
* @param obj the object to serialize
* @return the serialized object as a ByteString
*/
@Override
public ByteString serialize(JDataVersionedWrapper obj) {
ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
buffer.putLong(obj.version());
@@ -29,17 +20,10 @@ public class JDataVersionedWrapperSerializer {
return ByteString.copyFrom(buffer).concat(dataSerializer.serialize(obj.data()));
}
/**
* Deserializes a JDataVersionedWrapper object from a ByteBuffer.
* Returns a lazy wrapper (JDataVersionedWrapperLazy).
*
* @param data the ByteBuffer containing the serialized object
* @return the deserialized object
*/
public JDataVersionedWrapper deserialize(ByteBuffer data) {
var version = data.getLong();
return new JDataVersionedWrapperLazy(version, data.remaining(),
() -> dataSerializer.deserialize(data)
);
@Override
public JDataVersionedWrapper deserialize(ByteString data) {
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
var rawData = data.substring(Long.BYTES);
return new JDataVersionedWrapperLazy(version, rawData.size(), () -> dataSerializer.deserialize(rawData));
}
}

View File

@@ -5,72 +5,32 @@ import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.UUID;
/**
* JObjectKey is an interface for object keys to be used in the object store.
*/
public sealed interface JObjectKey extends Serializable, Comparable<JObjectKey> permits JObjectKeyImpl, JObjectKeyMax, JObjectKeyMin {
JObjectKeyMin MIN = new JObjectKeyMin();
JObjectKeyMax MAX = new JObjectKeyMax();
/**
* Creates a new JObjectKey from a string value.
*
* @param value the string value of the key
* @return a new JObjectKey
*/
static JObjectKey of(String value) {
return new JObjectKeyImpl(value);
static JObjectKey of(String name) {
return new JObjectKeyImpl(name);
}
/**
* Creates a new JObjectKey with a random UUID.
*
* @return a new JObjectKey with a random UUID
*/
static JObjectKey random() {
return new JObjectKeyImpl(UUID.randomUUID().toString());
}
/**
* Returns a JObjectKey that compares less than all other keys.
* Calling value on this key will result in an exception.
*
* @return a JObjectKey that compares less than all other keys
*/
static JObjectKey first() {
return MIN;
}
/**
* Returns a JObjectKey that compares greater than all other keys.
* Calling value on this key will result in an exception.
*
* @return a JObjectKey that compares greater than all other keys
*/
static JObjectKey last() {
return MAX;
}
/**
* Creates a new JObjectKey from a byte array.
*
* @param bytes the byte array representing the key
* @return a new JObjectKey
*/
static JObjectKey fromBytes(byte[] bytes) {
return new JObjectKeyImpl(new String(bytes, StandardCharsets.ISO_8859_1));
return new JObjectKeyImpl(new String(bytes, StandardCharsets.UTF_8));
}
/**
* Creates a new JObjectKey from a ByteBuffer.
*
* @param buff the ByteBuffer representing the key
* @return a new JObjectKey
*/
static JObjectKey fromByteBuffer(ByteBuffer buff) {
byte[] bytes = new byte[buff.remaining()];
buff.get(bytes);
return new JObjectKeyImpl(bytes);
return new JObjectKeyImpl(StandardCharsets.UTF_8.decode(buff).toString());
}
@Override
@@ -79,17 +39,9 @@ public sealed interface JObjectKey extends Serializable, Comparable<JObjectKey>
@Override
String toString();
/**
* Returns the byte buffer representation of the key.
*
* @return the byte buffer representation of the key
*/
byte[] bytes();
ByteBuffer toByteBuffer();
/**
* Returns the string value of the key.
*
* @return the string value of the key
*/
String value();
String name();
}

View File

@@ -1,34 +1,16 @@
package com.usatiuk.objects;
import com.usatiuk.utils.UninitializedByteBuffer;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import java.io.Serial;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Objects;
/**
* A "real" implementation of JObjectKey, containing an underlying string, and a cached lazily created byte buffer.
*/
public final class JObjectKeyImpl implements JObjectKey {
@Serial
private static final long serialVersionUID = 0L;
private final String value;
private transient ByteBuffer _bb = null;
public JObjectKeyImpl(String value) {
this.value = value;
}
public JObjectKeyImpl(byte[] bytes) {
this.value = new String(bytes, StandardCharsets.ISO_8859_1);
}
public record JObjectKeyImpl(String name) implements JObjectKey {
@Override
public int compareTo(JObjectKey o) {
switch (o) {
case JObjectKeyImpl jObjectKeyImpl -> {
return value.compareTo(jObjectKeyImpl.value());
return name.compareTo(jObjectKeyImpl.name());
}
case JObjectKeyMax jObjectKeyMax -> {
return -1;
@@ -41,40 +23,21 @@ public final class JObjectKeyImpl implements JObjectKey {
@Override
public String toString() {
return value;
return name;
}
@Override
public byte[] bytes() {
return name.getBytes(StandardCharsets.UTF_8);
}
@Override
public ByteBuffer toByteBuffer() {
if (_bb != null) return _bb;
synchronized (this) {
if (_bb != null) return _bb;
var bytes = value.getBytes(StandardCharsets.ISO_8859_1);
var directBb = UninitializedByteBuffer.allocate(bytes.length);
directBb.put(bytes);
directBb.flip();
_bb = directBb;
return directBb;
}
var heapBb = StandardCharsets.UTF_8.encode(name);
if (heapBb.isDirect()) return heapBb;
var directBb = UninitializedByteBuffer.allocateUninitialized(heapBb.remaining());
directBb.put(heapBb);
directBb.flip();
return directBb;
}
@Override
public String value() {
return value;
}
@Override
public boolean equals(Object obj) {
if (obj == this) return true;
if (obj == null || obj.getClass() != this.getClass()) return false;
var that = (JObjectKeyImpl) obj;
return Objects.equals(this.value, that.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
}

View File

@@ -2,9 +2,6 @@ package com.usatiuk.objects;
import java.nio.ByteBuffer;
/**
* JObjectKey implementation that compares greater than all other keys.
*/
public record JObjectKeyMax() implements JObjectKey {
@Override
public int compareTo(JObjectKey o) {
@@ -21,13 +18,18 @@ public record JObjectKeyMax() implements JObjectKey {
}
}
@Override
public byte[] bytes() {
throw new UnsupportedOperationException();
}
@Override
public ByteBuffer toByteBuffer() {
throw new UnsupportedOperationException();
}
@Override
public String value() {
public String name() {
throw new UnsupportedOperationException();
}
}

View File

@@ -2,9 +2,6 @@ package com.usatiuk.objects;
import java.nio.ByteBuffer;
/**
* JObjectKey implementation that compares less than all other keys.
*/
public record JObjectKeyMin() implements JObjectKey {
@Override
public int compareTo(JObjectKey o) {
@@ -21,13 +18,18 @@ public record JObjectKeyMin() implements JObjectKey {
}
}
@Override
public byte[] bytes() {
throw new UnsupportedOperationException();
}
@Override
public ByteBuffer toByteBuffer() {
throw new UnsupportedOperationException();
}
@Override
public String value() {
public String name() {
throw new UnsupportedOperationException();
}
}

View File

@@ -2,17 +2,12 @@ package com.usatiuk.objects;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.utils.SerializationHelper;
import com.usatiuk.dhfs.utils.SerializationHelper;
import io.quarkus.arc.DefaultBean;
import jakarta.enterprise.context.ApplicationScoped;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* Simple Java object serializer.
*/
@ApplicationScoped
@DefaultBean
public class JavaDataSerializer implements ObjectSerializer<JData> {
@@ -21,8 +16,9 @@ public class JavaDataSerializer implements ObjectSerializer<JData> {
return SerializationHelper.serialize(obj);
}
public JData deserialize(ByteBuffer data) {
try (var is = UnsafeByteOperations.unsafeWrap(data).newInput()) {
@Override
public JData deserialize(ByteString data) {
try (var is = data.newInput()) {
return SerializationHelper.deserialize(is);
} catch (IOException e) {
throw new RuntimeException(e);

View File

@@ -2,27 +2,8 @@ package com.usatiuk.objects;
import com.google.protobuf.ByteString;
import java.nio.ByteBuffer;
/**
* Interface for serializing and deserializing objects.
*
* @param <T> the type of object to serialize/deserialize
*/
public interface ObjectSerializer<T> {
/**
* Serialize an object to a ByteString.
*
* @param obj the object to serialize
* @return the serialized object as a ByteString
*/
ByteString serialize(T obj);
/**
* Deserialize an object from a ByteBuffer.
*
* @param data the ByteBuffer containing the serialized object
* @return the deserialized object
*/
T deserialize(ByteBuffer data);
T deserialize(ByteString data);
}

View File

@@ -1,70 +1,24 @@
package com.usatiuk.objects.iterators;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import org.apache.commons.lang3.tuple.Pair;
import java.util.Iterator;
/**
* An iterator over key-value pairs that can be closed and supports peek and skip operations, in both directions.
* @param <K> the type of the keys
* @param <V> the type of the values
*/
public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends Iterator<Pair<K, V>>, AutoCloseable {
/**
* Returns the upcoming key in the forward direction without advancing the iterator.
*
* @return the current key
* @throws IllegalStateException if there is no next element
*/
public interface CloseableKvIterator<K extends Comparable<? super K>, V> extends Iterator<Pair<K, V>>, AutoCloseableNoThrow {
K peekNextKey();
/**
* Skips the next element in the forward direction.
*
* @throws IllegalStateException if there is no next element
*/
void skip();
/**
* Checks if there is a next element in the forward direction.
*
* @return true if there is a next element, false otherwise
* @throws IllegalStateException if there is no next element
*/
K peekPrevKey();
/**
* Returns the key-value pair in the reverse direction, and advances the iterator.
*
* @return the previous key-value pair
* @throws IllegalStateException if there is no previous element
*/
Pair<K, V> prev();
/**
* Checks if there is a previous element in the reverse direction.
*
* @return true if there is a previous element, false otherwise
*/
boolean hasPrev();
/**
* Skips the previous element in the reverse direction.
*
* @throws IllegalStateException if there is no previous element
*/
void skipPrev();
/**
* Returns a reversed iterator that iterates in the reverse direction.
*
* @return a new CloseableKvIterator that iterates in the reverse direction
*/
default CloseableKvIterator<K, V> reversed() {
return new ReversedKvIterator<K, V>(this);
}
@Override
void close();
}

View File

@@ -1,13 +1,10 @@
package com.usatiuk.objects.iterators;
/**
* Interface indicating that data is present.
* @param <V> the type of the value
*/
public interface Data<V> extends MaybeTombstone<V> {
/**
* Get the value.
* @return the value
*/
V value();
import java.util.Optional;
public record Data<V>(V value) implements MaybeTombstone<V> {
@Override
public Optional<V> opt() {
return Optional.of(value);
}
}

View File

@@ -1,9 +0,0 @@
package com.usatiuk.objects.iterators;
/**
* Simple implementation of the Data interface.
* @param value the value
* @param <V> the type of the value
*/
public record DataWrapper<V>(V value) implements Data<V> {
}

View File

@@ -0,0 +1,6 @@
package com.usatiuk.objects.iterators;
@FunctionalInterface
public interface IterProdFn<K extends Comparable<K>, V> {
CloseableKvIterator<K, V> get(IteratorStart start, K key);
}

View File

@@ -1,8 +1,5 @@
package com.usatiuk.objects.iterators;
/**
* Allows to specify initial positioning of the iterator relative to the requested key.
*/
public enum IteratorStart {
LT,
LE,

View File

@@ -5,25 +5,11 @@ import org.apache.commons.lang3.tuple.Pair;
import java.util.NoSuchElementException;
import java.util.function.Function;
/**
* A key-value iterator that filters keys based on a predicate.
*
* @param <K> the type of the keys
* @param <V> the type of the values
*/
public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final CloseableKvIterator<K, V> _backing;
private final Function<K, Boolean> _filter;
private K _next;
/**
* Constructs a KeyPredicateKvIterator with the specified backing iterator, start position, and filter.
*
* @param backing the backing iterator
* @param start the starting position relative to the startKey
* @param startKey the starting key
* @param filter the filter function to apply to keys. Only keys for which this function returns true will be included in the iteration.
*/
public KeyPredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<K, Boolean> filter) {
_goingForward = true;
_backing = backing;
@@ -53,20 +39,20 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
}
// switch (start) {
// case LT -> {
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
// }
// case LE -> {
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
// }
// case GT -> {
// assert _next == null || _next.compareTo(startKey) > 0;
// }
// case GE -> {
// assert _next == null || _next.compareTo(startKey) >= 0;
// }
// }
switch (start) {
case LT -> {
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
}
case LE -> {
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
}
case GT -> {
assert _next == null || _next.compareTo(startKey) > 0;
}
case GE -> {
assert _next == null || _next.compareTo(startKey) >= 0;
}
}
}
private void fillNext() {

View File

@@ -4,23 +4,10 @@ import org.apache.commons.lang3.tuple.Pair;
import java.util.function.Function;
/**
* A mapping key-value iterator that transforms the values of a backing iterator using a specified function.
*
* @param <K> the type of the keys
* @param <V> the type of the values in the backing iterator
* @param <V_T> the type of the transformed values
*/
public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements CloseableKvIterator<K, V_T> {
private final CloseableKvIterator<K, V> _backing;
private final Function<V, V_T> _transformer;
/**
* Constructs a MappingKvIterator with the specified backing iterator and transformer function.
*
* @param backing the backing iterator
* @param transformer the function to transform values
*/
public MappingKvIterator(CloseableKvIterator<K, V> backing, Function<V, V_T> transformer) {
_backing = backing;
_transformer = transformer;

View File

@@ -1,8 +1,7 @@
package com.usatiuk.objects.iterators;
/**
* Optional-like interface, can either be {@link Data} or {@link Tombstone}.
* @param <T> the type of the value
*/
import java.util.Optional;
public interface MaybeTombstone<T> {
Optional<T> opt();
}

Some files were not shown because too many files have changed in this diff Show More