mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-29 04:57:48 +01:00
Compare commits
1 Commits
686cc550db
...
type-itera
| Author | SHA1 | Date | |
|---|---|---|---|
| 7ba219f35e |
4
.dockerignore
Normal file
4
.dockerignore
Normal file
@@ -0,0 +1,4 @@
|
||||
**/.parcel-cache
|
||||
**/dist
|
||||
**/node_modules
|
||||
**/target
|
||||
290
.github/workflows/server.yml
vendored
290
.github/workflows/server.yml
vendored
@@ -7,6 +7,12 @@ on:
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
|
||||
env:
|
||||
# Use docker.io for Docker Hub if empty
|
||||
REGISTRY: ghcr.io
|
||||
# github.repository as <account>/<repo>
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-dhfs:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -14,21 +20,26 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: "recursive"
|
||||
|
||||
- name: Install sudo for ACT
|
||||
run: apt-get update && apt-get install -y sudo
|
||||
if: env.ACT=='true'
|
||||
|
||||
- name: Install FUSE
|
||||
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
|
||||
- name: Install fuse and maven
|
||||
run: sudo apt-get update && sudo apt-get install -y libfuse2
|
||||
|
||||
- name: User allow other for fuse
|
||||
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
|
||||
- name: Download maven
|
||||
run: |
|
||||
cd "$HOME"
|
||||
mkdir maven-bin
|
||||
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
|
||||
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
|
||||
|
||||
- name: Dump fuse.conf
|
||||
run: cat /etc/fuse.conf
|
||||
- name: Maven info
|
||||
run: |
|
||||
echo $GITHUB_PATH
|
||||
echo $PATH
|
||||
mvn -v
|
||||
|
||||
- name: Set up JDK 21
|
||||
uses: actions/setup-java@v4
|
||||
@@ -37,21 +48,16 @@ jobs:
|
||||
distribution: "zulu"
|
||||
cache: maven
|
||||
|
||||
- name: Build LazyFS
|
||||
run: cd thirdparty/lazyfs/ && ./build.sh
|
||||
|
||||
- name: Test with Maven
|
||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify javadoc:aggregate
|
||||
run: cd dhfs-parent && mvn --batch-mode --update-snapshots package verify
|
||||
|
||||
# - name: Build with Maven
|
||||
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-parent/dhfs-fuse/target/quarkus-app
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Javadocs
|
||||
path: dhfs-parent/target/reports/apidocs/
|
||||
path: dhfs-parent/server/target/quarkus-app
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ always() }}
|
||||
@@ -83,12 +89,211 @@ jobs:
|
||||
name: Webui
|
||||
path: webui/dist
|
||||
|
||||
build-native-libs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
cross: "linux/amd64"
|
||||
- os: ubuntu-latest
|
||||
cross: "linux/arm64"
|
||||
- os: macos-latest
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
DO_LOCAL_BUILD: ${{ matrix.os == 'macos-latest' }}
|
||||
DOCKER_PLATFORM: ${{ matrix.cross || 'NATIVE' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set SANITIZED_DOCKER_PLATFORM
|
||||
run: echo "SANITIZED_DOCKER_PLATFORM=$(echo $DOCKER_PLATFORM | tr / _ )" >> $GITHUB_ENV
|
||||
|
||||
- name: Set DOCKER_BUILDER_IMAGE
|
||||
run: echo "DOCKER_BUILDER_IMAGE=dhfs_lib_builder-${{matrix.os}}-$SANITIZED_DOCKER_PLATFORM" >> $GITHUB_ENV
|
||||
|
||||
- name: Build config
|
||||
run: |
|
||||
echo DO_LOCAL_BUILD: $DO_LOCAL_BUILD
|
||||
echo DOCKER_PLATFORM: $DOCKER_PLATFORM
|
||||
echo SANITIZED_DOCKER_PLATFORM: $SANITIZED_DOCKER_PLATFORM
|
||||
echo DOCKER_BUILDER_IMAGE: $DOCKER_BUILDER_IMAGE
|
||||
|
||||
- name: Set up JDK 21
|
||||
if: ${{ env.DO_LOCAL_BUILD == 'TRUE' }}
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
java-version: "21"
|
||||
distribution: "zulu"
|
||||
cache: maven
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Build Docker builder image
|
||||
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./libdhfs_support/builder
|
||||
file: ./libdhfs_support/builder/Dockerfile
|
||||
push: false
|
||||
platforms: ${{ env.DOCKER_PLATFORM }}
|
||||
tags: ${{ env.DOCKER_BUILDER_IMAGE }}
|
||||
cache-from: type=gha,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
|
||||
cache-to: type=gha,mode=max,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
|
||||
load: true
|
||||
|
||||
- name: Build the library
|
||||
run: |
|
||||
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result"
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }}
|
||||
path: result
|
||||
|
||||
merge-native-libs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-native-libs]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: downloaded-libs
|
||||
|
||||
- name: Merge all
|
||||
run: rsync -av downloaded-libs/NativeLib*/* result/
|
||||
|
||||
- name: Check that libs exists
|
||||
run: |
|
||||
test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1
|
||||
|
||||
- name: Upload
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: result
|
||||
|
||||
publish-docker:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
# This is used to complete the identity challenge
|
||||
# with sigstore/fulcio when running outside of PRs.
|
||||
id-token: write
|
||||
|
||||
needs: [build-webui, merge-native-libs, build-dhfs]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download server package
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-package-downloaded
|
||||
|
||||
- name: Download webui
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: Webui
|
||||
path: webui-dist-downloaded
|
||||
|
||||
- name: Download native libs
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: dhfs-native-downloaded
|
||||
|
||||
- name: Show all the files
|
||||
run: find .
|
||||
|
||||
# Install the cosign tool except on PR
|
||||
# https://github.com/sigstore/cosign-installer
|
||||
- name: Install cosign
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: sigstore/cosign-installer@v3.5.0
|
||||
with:
|
||||
cosign-release: "v2.2.4"
|
||||
|
||||
# Set up BuildKit Docker container builder to be able to build
|
||||
# multi-platform images and export cache
|
||||
# https://github.com/docker/setup-buildx-action
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Extract metadata (tags, labels) for Docker
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
id: build-and-push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.ci
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# Sign the resulting Docker image digest except on PRs.
|
||||
# This will only write to the public Rekor transparency log when the Docker
|
||||
# repository is public to avoid leaking data. If you would like to publish
|
||||
# transparency data even for private images, pass --force to cosign below.
|
||||
# https://github.com/sigstore/cosign
|
||||
- name: Sign the published Docker image
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
env:
|
||||
# https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable
|
||||
TAGS: ${{ steps.meta.outputs.tags }}
|
||||
DIGEST: ${{ steps.build-and-push.outputs.digest }}
|
||||
# This step uses the identity token to provision an ephemeral certificate
|
||||
# against the sigstore community Fulcio instance.
|
||||
run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST}
|
||||
|
||||
publish-run-wrapper:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
# This is used to complete the identity challenge
|
||||
# with sigstore/fulcio when running outside of PRs.
|
||||
id-token: write
|
||||
|
||||
needs: [build-webui, build-dhfs]
|
||||
needs: [build-webui, merge-native-libs, build-dhfs]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -104,6 +309,11 @@ jobs:
|
||||
name: Webui
|
||||
path: webui-dist-downloaded
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: dhfs-native-downloaded
|
||||
|
||||
- name: Show all the files
|
||||
run: find .
|
||||
|
||||
@@ -111,18 +321,17 @@ jobs:
|
||||
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
|
||||
|
||||
- name: Copy DHFS
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
|
||||
|
||||
- name: Copy Webui
|
||||
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
|
||||
|
||||
- name: Copy Webui
|
||||
run: cp -r ./dhfs-native-downloaded "run-wrapper-out/dhfs/app/NativeLibs"
|
||||
|
||||
- name: Copy run wrapper
|
||||
run: cp -r ./run-wrapper/* "run-wrapper-out/dhfs/app/"
|
||||
|
||||
- name: Copy README
|
||||
run: |
|
||||
cp README.md "run-wrapper-out/dhfs/"
|
||||
|
||||
- name: Add version to run wrapper
|
||||
run: echo $GITHUB_RUN_ID > "run-wrapper-out/dhfs/app/"version
|
||||
|
||||
@@ -134,36 +343,3 @@ jobs:
|
||||
with:
|
||||
name: Run wrapper
|
||||
path: ~/run-wrapper.tar.gz
|
||||
|
||||
publish-javadoc:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
needs: [build-webui, build-dhfs]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: DHFS Javadocs
|
||||
path: dhfs-javadocs-downloaded
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: "dhfs-javadocs-downloaded"
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "thirdparty/lazyfs/lazyfs"]
|
||||
path = thirdparty/lazyfs/lazyfs
|
||||
url = git@github.com:dsrhaslab/lazyfs.git
|
||||
1
.vscode/ltex.dictionary.en-US.txt
vendored
1
.vscode/ltex.dictionary.en-US.txt
vendored
@@ -1 +0,0 @@
|
||||
Syncthing
|
||||
35
Dockerfile
Normal file
35
Dockerfile
Normal file
@@ -0,0 +1,35 @@
|
||||
FROM node:20-bullseye as webui-build
|
||||
|
||||
WORKDIR /usr/src/app/webui-build
|
||||
COPY ./webui/package*.json ./
|
||||
RUN npm i
|
||||
COPY ./webui/. .
|
||||
RUN npm run build
|
||||
|
||||
FROM azul/zulu-openjdk:21 as server-build
|
||||
|
||||
WORKDIR /usr/src/app/server-build
|
||||
COPY ./server/.mvn .mvn
|
||||
COPY ./server/mvnw ./server/pom.xml ./
|
||||
RUN ./mvnw quarkus:go-offline
|
||||
# The previous thing still doesn't download 100% everything
|
||||
RUN ./mvnw -Dmaven.test.skip=true -Dskip.unit=true package --fail-never
|
||||
COPY ./server/. .
|
||||
RUN ./mvnw -Dmaven.test.skip=true -Dskip.unit=true clean package
|
||||
|
||||
FROM azul/zulu-openjdk-alpine:21-jre-headless
|
||||
|
||||
RUN apk update && apk add fuse && rm -rf /var/cache/apk/*
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
COPY --from=server-build /usr/src/app/server-build/target/quarkus-app/. .
|
||||
RUN mkdir -p webui
|
||||
COPY --from=webui-build /usr/src/app/webui-build/dist/. ./webui
|
||||
|
||||
ENV dhfs_webui_root=/usr/src/app/webui
|
||||
|
||||
COPY ./dockerentry.sh .
|
||||
|
||||
RUN ["chmod", "+x", "./dockerentry.sh"]
|
||||
|
||||
CMD [ "./dockerentry.sh" ]
|
||||
24
Dockerfile.ci
Normal file
24
Dockerfile.ci
Normal file
@@ -0,0 +1,24 @@
|
||||
FROM azul/zulu-openjdk:21-jre-headless
|
||||
|
||||
RUN apt update && apt install -y libfuse2 && apt-get clean
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
COPY ./dhfs-package-downloaded/lib .
|
||||
COPY ./dhfs-package-downloaded/*.jar .
|
||||
COPY ./dhfs-package-downloaded/app .
|
||||
COPY ./dhfs-package-downloaded/quarkus .
|
||||
|
||||
WORKDIR /usr/src/app/native-libs
|
||||
COPY ./dhfs-native-downloaded/. .
|
||||
WORKDIR /usr/src/app/webui
|
||||
COPY ./webui-dist-downloaded/. .
|
||||
|
||||
ENV dhfs_webui_root=/usr/src/app/webui
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
COPY ./dockerentry.sh .
|
||||
|
||||
RUN ["chmod", "+x", "./dockerentry.sh"]
|
||||
|
||||
CMD [ "./dockerentry.sh" ]
|
||||
81
README.md
81
README.md
@@ -1,6 +1,4 @@
|
||||
# Distributed Home File System
|
||||
|
||||
[Javadocs](https://usatiuk.github.io/dhfs/)
|
||||
# Distributed Home File System 🚧
|
||||
|
||||
## What is this?
|
||||
|
||||
@@ -13,78 +11,9 @@ Syncthing and allowing you to stream your files like Google Drive File Stream
|
||||
|
||||
[Download latest build](https://nightly.link/usatiuk/dhfs/workflows/server/main/Run%20wrapper.zip)
|
||||
|
||||
This is a simple set of scripts that allows you to run/stop
|
||||
the DHFS server in the background, and update it.
|
||||
This is a simple wrapper around the jar/web ui distribution that allows you to run/stop
|
||||
the DHFS server in the background, and update itself (hopefully!)
|
||||
|
||||
Once unpacked, in the root folder (`dhfs`), there will be 3 folders:
|
||||
## How to use it and how it works?
|
||||
|
||||
- `app` contains the application
|
||||
- `data` contains the filesystem data storage
|
||||
- `fuse` is the default filesystem mount point (not on Windows, the default mount drive letter is `Z`)
|
||||
|
||||
Note that on Windows, the path to the root can not contain spaces.
|
||||
|
||||
## How to use it?
|
||||
|
||||
### General prerequisites
|
||||
|
||||
Java should be available as `java` in `PATH`, or with a correctly set `JAVA_HOME` (ignored on Windows), and Java 21 is required.
|
||||
|
||||
FUSE 2 userspace library also should be available:
|
||||
|
||||
- On Ubuntu `libfuse2` package can be installed, or an analogous package for other distributions.
|
||||
|
||||
- On Windows, [WinFsp](https://winfsp.dev/) should be installed.
|
||||
|
||||
- On macOS, [macFUSE](https://macfuse.github.io/).
|
||||
|
||||
### How to run it?
|
||||
|
||||
In the run-wrapper `app` folder, 3 scripts are available.
|
||||
|
||||
- `run` script starts the filesystem
|
||||
- `stop` script stops it
|
||||
- `update` script will update the filesystem to the newest available CI build
|
||||
|
||||
On Windows, Powershell versions of the scripts should be used. For them to work, it might be required to allow execution of unsigned scripts using `set-executionpolicy unrestricted`.
|
||||
|
||||
### Additional options
|
||||
|
||||
Additional options for the filesystem can be specified in the `extra-opts` file in the same directory with the run scripts.
|
||||
|
||||
One line in the `extra-opts` file corresponds to one option passed to the JVM when starting the filesystem.
|
||||
|
||||
Some extra possible configuration options are:
|
||||
|
||||
- `-Ddhfs.fuse.root=` specifies the root where filesystem should be mounted. By default, it is the `fuse` path under the `run-wrapper` root. For windows, it should be a disk root, by default `Z:\`.
|
||||
- `-Ddhfs.objects.last-seen.timeout=` specifies the period of time (in seconds) after which unavailable peers will be ignored for garbage collection and resynchronized after being reconnected. The default is 43200 (30 days), if set to `-1`, this feature is disabled.
|
||||
- `-Ddhfs.objects.autosync.download-all=` specifies whether all objects (files and their data) should be downloaded to this peer. `true` or `false`, the default is `false`.
|
||||
- `-Ddhfs.objects.peerdiscovery.port=` port to broadcast on and listen to for LAN peer discovery (default is `42262`)
|
||||
- `-Ddhfs.objects.peerdiscovery.broadcast=` whether to enable local peer discovery or not (default is `true`)
|
||||
- `-Dquarkus.http.port=` HTTP port to listen on (default is `8080`)
|
||||
- `-Dquarkus.http.ssl-port=` HTTPS port to listen on (default is `8443`)
|
||||
- `-Dquarkus.http.host=` IP address to listen on (default is `0.0.0.0`)
|
||||
- `-Ddhfs.peerdiscovery.static-peers=` allows to manually specify a peer's address in format of `peer id:http port:https port`, for example `-Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011`
|
||||
|
||||
On Windows, the entire space for the filesystem should also be preallocated, the `-Ddhfs.objects.persistence.lmdb.size=` option controls the size (the value is in bytes), on Windows the default is 100 GB.
|
||||
|
||||
In case of errors, the standard output is redirected to `quarkus.log` in the `app` folder, on Windows the error output is separate.
|
||||
|
||||
### How to connect to other peers?
|
||||
|
||||
Then, a web interface will be available at `losthost:8080` (or whatever the HTTP port is), that can be used to connect with other peers. Peers on local network should be available to be connected to automatically.
|
||||
|
||||
## Other notes
|
||||
|
||||
### Running tests
|
||||
|
||||
To run LazyFS tests, LazyFS needs to be built: the git submodules need to be cloned and `./thirdparty/lazyfs/build.sh` script needs to be run.
|
||||
|
||||
LazyFS tests were only tested on Linux.
|
||||
|
||||
|
||||
### Notice
|
||||
|
||||
This software was developed with the support of the Faculty of Information Technology, Czech Technical University in Prague, [fit.cvut.cz](https://fit.cvut.cz)
|
||||
|
||||
<img src="./docs/logo-fit-en-cerna.svg" height="64">
|
||||
TODO 😁
|
||||
|
||||
2
dhfs-parent/.gitignore
vendored
2
dhfs-parent/.gitignore
vendored
@@ -41,5 +41,3 @@ nb-configuration.xml
|
||||
|
||||
# Plugin directory
|
||||
/.quarkus/cli/plugins/
|
||||
|
||||
.jqwik-database
|
||||
@@ -1,11 +1,11 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||
<module name="dhfs-fuse" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main" />
|
||||
<module name="server" />
|
||||
<option name="VM_PARAMETERS" value="--add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||
<module name="dhfs-fuse" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main" />
|
||||
<module name="server" />
|
||||
<option name="VM_PARAMETERS" value="--add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
|
||||
60
dhfs-parent/autoprotomap/deployment/pom.xml
Normal file
60
dhfs-parent/autoprotomap/deployment/pom.xml
Normal file
@@ -0,0 +1,60 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<name>Autoprotomap - Deployment</name>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc-deployment</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5-internal</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc-deployment</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>default-compile</id>
|
||||
<configuration>
|
||||
<annotationProcessorPaths>
|
||||
<path>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-processor</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
</path>
|
||||
</annotationProcessorPaths>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -0,0 +1,78 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.arc.deployment.GeneratedBeanBuildItem;
|
||||
import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor;
|
||||
import io.quarkus.deployment.annotations.BuildProducer;
|
||||
import io.quarkus.deployment.annotations.BuildStep;
|
||||
import io.quarkus.deployment.builditem.ApplicationIndexBuildItem;
|
||||
import io.quarkus.gizmo.ClassCreator;
|
||||
import io.quarkus.gizmo.SignatureBuilder;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.jboss.jandex.ClassType;
|
||||
import org.jboss.jandex.Type;
|
||||
|
||||
class AutoprotomapProcessor {
|
||||
@BuildStep
|
||||
ProtoIndexBuildItem index(ApplicationIndexBuildItem jandex) {
|
||||
var ret = new ProtoIndexBuildItem();
|
||||
var annot = jandex.getIndex().getAnnotations(ProtoMirror.class);
|
||||
for (var a : annot) {
|
||||
var protoTarget = jandex.getIndex().getClassByName(((ClassType) a.value().value()).name());
|
||||
// if (!messageImplementors.contains(protoTarget))
|
||||
// throw new IllegalArgumentException("Expected " + protoTarget + " to be a proto message");
|
||||
System.out.println("Found: " + a.name().toString() + " at " + protoTarget.name().toString() + " of " + a.target().asClass().name().toString());
|
||||
ret.protoMsgToObj.put(protoTarget, a.target().asClass());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@BuildStep
|
||||
void generateProtoSerializer(ApplicationIndexBuildItem jandex,
|
||||
ProtoIndexBuildItem protoIndex,
|
||||
BuildProducer<GeneratedBeanBuildItem> generatedClasses) {
|
||||
try {
|
||||
for (var o : protoIndex.protoMsgToObj.entrySet()) {
|
||||
System.out.println("Generating " + o.getKey().toString() + " -> " + o.getValue().toString());
|
||||
var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedClasses);
|
||||
|
||||
var msgType = io.quarkus.gizmo.Type.classType(o.getKey().name());
|
||||
var objType = io.quarkus.gizmo.Type.classType(o.getValue().name());
|
||||
|
||||
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
|
||||
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
|
||||
msgType, objType);
|
||||
|
||||
var msgJType = Type.create(o.getKey().name(), Type.Kind.CLASS);
|
||||
var objJType = Type.create(o.getValue().name(), Type.Kind.CLASS);
|
||||
|
||||
try (ClassCreator classCreator = ClassCreator.builder()
|
||||
.className("com.usatiuk.autoprotomap.generated.for" + o.getKey().simpleName())
|
||||
.signature(SignatureBuilder.forClass().addInterface(type))
|
||||
.classOutput(gizmoAdapter)
|
||||
.setFinal(true)
|
||||
.build()) {
|
||||
classCreator.addAnnotation(Singleton.class);
|
||||
|
||||
var generator = new ProtoSerializerGenerator(
|
||||
jandex.getIndex(),
|
||||
protoIndex,
|
||||
classCreator,
|
||||
msgJType,
|
||||
objJType
|
||||
);
|
||||
|
||||
generator.generate();
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(e + "\n");
|
||||
for (var el : e.getStackTrace()) {
|
||||
sb.append(el.toString() + "\n");
|
||||
}
|
||||
System.out.println(sb);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
public class Constants {
|
||||
public static final String FIELD_PREFIX = "_";
|
||||
|
||||
public static String capitalize(String str) {
|
||||
return str.substring(0, 1).toUpperCase() + str.substring(1);
|
||||
}
|
||||
|
||||
public static String stripPrefix(String str, String prefix) {
|
||||
if (str.startsWith(prefix)) {
|
||||
return str.substring(prefix.length());
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface Effect {
|
||||
void apply();
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import io.quarkus.builder.item.SimpleBuildItem;
|
||||
import org.apache.commons.collections4.BidiMap;
|
||||
import org.apache.commons.collections4.bidimap.DualHashBidiMap;
|
||||
import org.jboss.jandex.ClassInfo;
|
||||
|
||||
public final class ProtoIndexBuildItem extends SimpleBuildItem {
|
||||
BidiMap<ClassInfo, ClassInfo> protoMsgToObj = new DualHashBidiMap<>();
|
||||
}
|
||||
@@ -0,0 +1,342 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.Message;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.gizmo.*;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.jboss.jandex.Type;
|
||||
import org.jboss.jandex.*;
|
||||
import org.objectweb.asm.Opcodes;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static com.usatiuk.autoprotomap.deployment.Constants.*;
|
||||
|
||||
public class ProtoSerializerGenerator {
|
||||
private final Index index;
|
||||
private final ProtoIndexBuildItem protoIndex;
|
||||
private final ClassCreator classCreator;
|
||||
private final HashSet<Pair<ClassInfo, ClassInfo>> externalSerializers = new HashSet<>();
|
||||
private final Type topMessageType;
|
||||
private final Type topObjectType;
|
||||
|
||||
public ProtoSerializerGenerator(Index index, ProtoIndexBuildItem protoIndex, ClassCreator classCreator, Type topMessageType, Type topObjectType) {
|
||||
this.index = index;
|
||||
this.protoIndex = protoIndex;
|
||||
this.classCreator = classCreator;
|
||||
this.topMessageType = topMessageType;
|
||||
this.topObjectType = topObjectType;
|
||||
}
|
||||
|
||||
private FieldDescriptor getOutsideSerializer(ClassInfo messageClass, ClassInfo objectClass) {
|
||||
var name = messageClass.name().withoutPackagePrefix() + objectClass.name().withoutPackagePrefix() + "serializer";
|
||||
var msgType = io.quarkus.gizmo.Type.classType(messageClass.name());
|
||||
var objType = io.quarkus.gizmo.Type.classType(objectClass.name());
|
||||
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
|
||||
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
|
||||
msgType, objType);
|
||||
var sig = SignatureBuilder.forField().setType(type).build();
|
||||
var fd = FieldDescriptor.of(classCreator.getClassName(), name, ProtoSerializer.class);
|
||||
if (externalSerializers.add(Pair.of(messageClass, objectClass))) {
|
||||
var fc = classCreator.getFieldCreator(fd);
|
||||
fc.addAnnotation(Inject.class);
|
||||
fc.setSignature(sig);
|
||||
fc.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
private void traverseHierarchy(Index index, ClassInfo klass, Consumer<ClassInfo> visitor) {
|
||||
var cur = klass;
|
||||
while (true) {
|
||||
visitor.accept(cur);
|
||||
|
||||
var next = cur.superClassType().name();
|
||||
if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break;
|
||||
cur = index.getClassByName(next);
|
||||
}
|
||||
}
|
||||
|
||||
private ArrayList<FieldInfo> findAllFields(Index index, ClassInfo klass) {
|
||||
ArrayList<FieldInfo> ret = new ArrayList<>();
|
||||
traverseHierarchy(index, klass, cur -> {
|
||||
ret.addAll(cur.fields());
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
|
||||
private void generateBuilderUse(BytecodeCreator bytecodeCreator,
|
||||
ResultHandle builder,
|
||||
Type messageType, Type objectType,
|
||||
ResultHandle object) {
|
||||
var builderType = Type.create(DotName.createComponentized(messageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var objectClass = index.getClassByName(objectType.name().toString());
|
||||
|
||||
Function<String, String> getterGetter = objectClass.isRecord()
|
||||
? Function.identity()
|
||||
: s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX));
|
||||
|
||||
for (var f : findAllFields(index, objectClass)) {
|
||||
var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX);
|
||||
|
||||
Supplier<ResultHandle> get = () -> {
|
||||
if ((f.flags() & Opcodes.ACC_PUBLIC) != 0)
|
||||
return bytecodeCreator.readInstanceField(f, object);
|
||||
else {
|
||||
var fieldGetter = getterGetter.apply(f.name());
|
||||
return bytecodeCreator.invokeVirtualMethod(
|
||||
MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object);
|
||||
}
|
||||
};
|
||||
|
||||
Effect doSimpleCopy = () -> {
|
||||
var setter = MethodDescriptor.ofMethod(builderType.name().toString(), "set" + capitalize(consideredFieldName),
|
||||
builderType.name().toString(), f.type().toString());
|
||||
|
||||
var val = get.get();
|
||||
bytecodeCreator.invokeVirtualMethod(setter, builder, val);
|
||||
};
|
||||
|
||||
switch (f.type().kind()) {
|
||||
case CLASS -> {
|
||||
if (f.type().equals(Type.create(String.class)) || f.type().equals(Type.create(ByteString.class))) {
|
||||
doSimpleCopy.apply();
|
||||
} else {
|
||||
var builderGetter = "get" + capitalize(f.name()) + "Builder";
|
||||
var protoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(f.type().name()));
|
||||
var nestedBuilderType = Type.create(DotName.createComponentized(protoType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
var nestedBuilder = bytecodeCreator.invokeVirtualMethod(
|
||||
MethodDescriptor.ofMethod(builderType.toString(), builderGetter, nestedBuilderType.name().toString()), builder);
|
||||
|
||||
var val = get.get();
|
||||
|
||||
generateBuilderUse(bytecodeCreator, nestedBuilder, Type.create(protoType.name(), Type.Kind.CLASS), f.type(), val);
|
||||
}
|
||||
}
|
||||
case PRIMITIVE -> {
|
||||
doSimpleCopy.apply();
|
||||
}
|
||||
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
|
||||
case PARAMETERIZED_TYPE ->
|
||||
throw new UnsupportedOperationException("Parametrized types not supported yet");
|
||||
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
|
||||
default -> throw new IllegalStateException("Unexpected type: " + f.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ResultHandle generateConstructorUse(
|
||||
BytecodeCreator bytecodeCreator,
|
||||
ClassCreator classCreator,
|
||||
Type messageType, Type objectType,
|
||||
ResultHandle message
|
||||
) {
|
||||
var constructor = findAllArgsConstructor(index, index.getClassByName(objectType.name()));
|
||||
if (constructor == null) {
|
||||
throw new IllegalStateException("No constructor found for type: " + objectType.name());
|
||||
}
|
||||
var argMap = new ResultHandle[constructor.parametersCount()];
|
||||
|
||||
for (int i = 0; i < argMap.length; i++) {
|
||||
var type = constructor.parameterType(i);
|
||||
var strippedName = stripPrefix(constructor.parameterName(i), FIELD_PREFIX);
|
||||
|
||||
IntConsumer doSimpleCopy = (arg) -> {
|
||||
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
|
||||
type.name().toString());
|
||||
argMap[arg] = bytecodeCreator.invokeVirtualMethod(call, message);
|
||||
};
|
||||
|
||||
switch (type.kind()) {
|
||||
case CLASS -> {
|
||||
if (type.equals(Type.create(String.class)) || type.equals(Type.create(ByteString.class))) {
|
||||
doSimpleCopy.accept(i);
|
||||
} else {
|
||||
var nestedProtoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(type.name()));
|
||||
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
|
||||
nestedProtoType.name().toString());
|
||||
var nested = bytecodeCreator.invokeVirtualMethod(call, message);
|
||||
argMap[i] = generateConstructorUse(bytecodeCreator, classCreator, Type.create(nestedProtoType.name(), Type.Kind.CLASS), type, nested);
|
||||
}
|
||||
}
|
||||
case PRIMITIVE -> {
|
||||
doSimpleCopy.accept(i);
|
||||
}
|
||||
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
|
||||
case PARAMETERIZED_TYPE ->
|
||||
throw new UnsupportedOperationException("Parametrized types not supported yet");
|
||||
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
|
||||
default -> throw new IllegalStateException("Unexpected type: " + type);
|
||||
}
|
||||
}
|
||||
|
||||
return bytecodeCreator.newInstance(constructor, argMap);
|
||||
}
|
||||
|
||||
private MethodInfo findAllArgsConstructor(Index index, ClassInfo klass) {
|
||||
ArrayList<FieldInfo> fields = findAllFields(index, klass);
|
||||
|
||||
var fieldCount = fields.size();
|
||||
var fieldNames = fields.stream().map(f -> stripPrefix(f.name(), FIELD_PREFIX)).sorted().toList();
|
||||
var fieldNameToType = fields.stream().collect(Collectors.toMap(f -> stripPrefix(f.name(), FIELD_PREFIX), FieldInfo::type));
|
||||
|
||||
for (var m : klass.constructors()) {
|
||||
if (m.parametersCount() != fieldCount) continue;
|
||||
var parameterNames = m.parameters().stream().map(n -> stripPrefix(n.name(), FIELD_PREFIX)).sorted().toList();
|
||||
if (!Objects.equals(fieldNames, parameterNames)) continue;
|
||||
|
||||
for (var p : m.parameters()) {
|
||||
if (!Objects.equals(fieldNameToType.get(stripPrefix(p.name(), FIELD_PREFIX)), p.type())) continue;
|
||||
}
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public void generateAbstract() {
|
||||
var kids = Stream.concat(index.getAllKnownSubclasses(topObjectType.name()).stream(),
|
||||
index.getAllKnownImplementors(topObjectType.name()).stream())
|
||||
.filter(k -> !k.isAbstract() && !k.isInterface()).toList();
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("serialize",
|
||||
Message.class, Object.class)) {
|
||||
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
for (var nestedObjClass : kids) {
|
||||
System.out.println("Generating " + nestedObjClass.name() + " serializer for " + topObjectType.name());
|
||||
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
|
||||
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
|
||||
boolean doExternalCall = false;
|
||||
if (nestedMessageClass == null) {
|
||||
var msgInfo = index.getClassByName(topMessageType.name());
|
||||
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
|
||||
doExternalCall = true;
|
||||
}
|
||||
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var statement = method.ifTrue(method.instanceOf(arg, nestedObjClass.name().toString()));
|
||||
|
||||
try (var branch = statement.trueBranch()) {
|
||||
if (doExternalCall) {
|
||||
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
|
||||
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
|
||||
var serialized = branch.invokeInterfaceMethod(
|
||||
MethodDescriptor.ofMethod(ProtoSerializer.class,
|
||||
"serialize", Message.class, Object.class),
|
||||
serializerLoaded, arg);
|
||||
branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
|
||||
"set" + capitalize(nestedObjType.name().withoutPackagePrefix()),
|
||||
builderType.name().toString(), nestedMessageType.name().toString()), builder, serialized);
|
||||
} else {
|
||||
var nestedBuilderType = Type.create(DotName.createComponentized(nestedMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
var nestedBuilder = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
|
||||
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()) + "Builder",
|
||||
nestedBuilderType.name().toString()), builder);
|
||||
generateBuilderUse(branch, nestedBuilder, nestedMessageType, nestedObjType, arg);
|
||||
}
|
||||
var result = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
|
||||
branch.returnValue(result);
|
||||
}
|
||||
}
|
||||
method.throwException(IllegalArgumentException.class, "Unknown object type");
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
|
||||
Object.class, Message.class)) {
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
for (var nestedObjClass : kids) {
|
||||
System.out.println("Generating " + nestedObjClass.name() + " deserializer for " + topObjectType.name());
|
||||
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
|
||||
boolean doExternalCall = false;
|
||||
if (nestedMessageClass == null) {
|
||||
var msgInfo = index.getClassByName(topMessageType.name());
|
||||
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
|
||||
doExternalCall = true;
|
||||
}
|
||||
|
||||
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var typeCheck = method.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
|
||||
"has" + capitalize(nestedObjType.name().withoutPackagePrefix()), boolean.class), arg);
|
||||
|
||||
var statement = method.ifTrue(typeCheck);
|
||||
|
||||
try (var branch = statement.trueBranch()) {
|
||||
var nestedMessage = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
|
||||
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()), nestedMessageType.name().toString()), arg);
|
||||
if (doExternalCall) {
|
||||
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
|
||||
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
|
||||
branch.returnValue(branch.invokeInterfaceMethod(
|
||||
MethodDescriptor.ofMethod(ProtoSerializer.class,
|
||||
"deserialize", Object.class, Message.class),
|
||||
serializerLoaded, nestedMessage));
|
||||
} else {
|
||||
branch.returnValue(generateConstructorUse(branch, classCreator, nestedMessageType, nestedObjType, nestedMessage));
|
||||
}
|
||||
}
|
||||
}
|
||||
method.throwException(IllegalArgumentException.class, "Unknown object type");
|
||||
}
|
||||
}
|
||||
|
||||
public void generate() {
|
||||
var objInfo = index.getClassByName(topObjectType.name());
|
||||
if (objInfo.isAbstract() || objInfo.isInterface()) {
|
||||
generateAbstract();
|
||||
return;
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("serialize",
|
||||
Message.class, Object.class)) {
|
||||
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
generateBuilderUse(method, builder, topMessageType, topObjectType, arg);
|
||||
|
||||
var result = method.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
|
||||
|
||||
method.returnValue(result);
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
|
||||
Object.class, Message.class)) {
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
method.returnValue(generateConstructorUse(method, classCreator, topMessageType, topObjectType, arg));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.usatiuk.autoprotomap.test;
|
||||
|
||||
import io.quarkus.test.QuarkusDevModeTest;
|
||||
import org.jboss.shrinkwrap.api.ShrinkWrap;
|
||||
import org.jboss.shrinkwrap.api.spec.JavaArchive;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
||||
public class AutoprotomapDevModeTest {
|
||||
|
||||
// Start hot reload (DevMode) test with your extension loaded
|
||||
@RegisterExtension
|
||||
static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest()
|
||||
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
|
||||
|
||||
@Test
|
||||
public void writeYourOwnDevModeTest() {
|
||||
// Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information
|
||||
Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.usatiuk.autoprotomap.test;
|
||||
|
||||
import io.quarkus.test.QuarkusUnitTest;
|
||||
import org.jboss.shrinkwrap.api.ShrinkWrap;
|
||||
import org.jboss.shrinkwrap.api.spec.JavaArchive;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
||||
public class AutoprotomapTest {
|
||||
|
||||
// Start unit test with your extension loaded
|
||||
@RegisterExtension
|
||||
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
|
||||
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
|
||||
|
||||
@Test
|
||||
public void writeYourOwnUnitTest() {
|
||||
// Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information
|
||||
Assertions.assertTrue(true, "Add some assertions to " + getClass().getName());
|
||||
}
|
||||
}
|
||||
107
dhfs-parent/autoprotomap/integration-tests/pom.xml
Normal file
107
dhfs-parent/autoprotomap/integration-tests/pom.xml
Normal file
@@ -0,0 +1,107 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>autoprotomap-integration-tests</artifactId>
|
||||
<name>Autoprotomap - Integration Tests</name>
|
||||
|
||||
<properties>
|
||||
<skipITs>true</skipITs>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>integration-test</goal>
|
||||
<goal>verify</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<native.image.path>${project.build.directory}/${project.build.finalName}-runner
|
||||
</native.image.path>
|
||||
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
|
||||
<maven.home>${maven.home}</maven.home>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>native-image</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>native</name>
|
||||
</property>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipTests>${native.surefire.skip}</skipTests>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<properties>
|
||||
<skipITs>false</skipITs>
|
||||
<quarkus.native.enabled>true</quarkus.native.enabled>
|
||||
</properties>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(AbstractProto.class)
|
||||
public abstract class AbstractObject {
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class CustomObject extends AbstractObject {
|
||||
public int testNum = 0;
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class CustomObjectSerializer implements ProtoSerializer<CustomObjectProto, CustomObject> {
|
||||
@Override
|
||||
public CustomObject deserialize(CustomObjectProto message) {
|
||||
return new CustomObject(2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CustomObjectProto serialize(CustomObject object) {
|
||||
return CustomObjectProto.newBuilder().setTest(1).build();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(InterfaceObjectProto.class)
|
||||
public interface InterfaceObject {
|
||||
String key();
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@ProtoMirror(NestedObjectProto.class)
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class NestedObject extends AbstractObject {
|
||||
public SimpleObject object;
|
||||
public String _nestedName;
|
||||
public ByteString _nestedSomeBytes;
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(RecordObjectProto.class)
|
||||
public record RecordObject(String key) implements InterfaceObject {
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(RecordObject2Proto.class)
|
||||
public record RecordObject2(String key, int value) implements InterfaceObject {
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@ProtoMirror(SimpleObjectProto.class)
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class SimpleObject extends AbstractObject {
|
||||
public int numfield = 0;
|
||||
private String name;
|
||||
public ByteString someBytes;
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_package = "com.usatiuk.autoprotomap.it";
|
||||
option java_outer_classname = "TestProto";
|
||||
|
||||
package autoprotomap.test;
|
||||
|
||||
message SimpleObjectProto {
|
||||
int32 numfield = 1;
|
||||
string name = 2;
|
||||
bytes someBytes = 3;
|
||||
}
|
||||
|
||||
message NestedObjectProto {
|
||||
SimpleObjectProto object = 1;
|
||||
string nestedName = 2;
|
||||
bytes nestedSomeBytes = 3;
|
||||
}
|
||||
|
||||
message CustomObjectProto {
|
||||
int64 test = 1;
|
||||
}
|
||||
|
||||
message AbstractProto {
|
||||
oneof obj {
|
||||
NestedObjectProto nestedObject = 1;
|
||||
SimpleObjectProto simpleObject = 2;
|
||||
CustomObjectProto customObject = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message RecordObjectProto {
|
||||
string key = 1;
|
||||
}
|
||||
|
||||
message RecordObject2Proto {
|
||||
string key = 1;
|
||||
int32 value = 2;
|
||||
}
|
||||
|
||||
message InterfaceObjectProto {
|
||||
oneof obj {
|
||||
RecordObjectProto recordObject = 1;
|
||||
RecordObject2Proto recordObject2 = 2;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
quarkus.package.jar.decompiler.enabled=true
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusIntegrationTest;
|
||||
|
||||
@QuarkusIntegrationTest
|
||||
public class AutoprotomapResourceIT extends AutoprotomapResourceTest {
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
|
||||
@QuarkusTest
|
||||
public class AutoprotomapResourceTest {
|
||||
@Inject
|
||||
ProtoSerializer<SimpleObjectProto, SimpleObject> simpleProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<NestedObjectProto, NestedObject> nestedProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<AbstractProto, AbstractObject> abstractProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<InterfaceObjectProto, InterfaceObject> interfaceProtoSerializer;
|
||||
|
||||
@Test
|
||||
public void testSimple() {
|
||||
var ret = simpleProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
|
||||
Assertions.assertEquals(1234, ret.getNumfield());
|
||||
Assertions.assertEquals("simple test", ret.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSomeBytes());
|
||||
|
||||
var des = simpleProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(1234, des.getNumfield());
|
||||
Assertions.assertEquals("simple test", des.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNested() {
|
||||
var ret = nestedProtoSerializer.serialize(
|
||||
new NestedObject(
|
||||
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
|
||||
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
|
||||
Assertions.assertEquals(333, ret.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", ret.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", ret.getNestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedSomeBytes());
|
||||
|
||||
var des = nestedProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(333, des.object.numfield);
|
||||
Assertions.assertEquals(333, des.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", des.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", des.get_nestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractSimple() {
|
||||
var ret = abstractProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
|
||||
Assertions.assertEquals(1234, ret.getSimpleObject().getNumfield());
|
||||
Assertions.assertEquals("simple test", ret.getSimpleObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSimpleObject().getSomeBytes());
|
||||
|
||||
var des = (SimpleObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(1234, des.getNumfield());
|
||||
Assertions.assertEquals("simple test", des.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractCustom() {
|
||||
var ret = abstractProtoSerializer.serialize(new CustomObject(1234));
|
||||
Assertions.assertEquals(1, ret.getCustomObject().getTest());
|
||||
|
||||
var des = (CustomObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(2, des.getTestNum());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractNested() {
|
||||
var ret = abstractProtoSerializer.serialize(
|
||||
new NestedObject(
|
||||
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
|
||||
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
|
||||
Assertions.assertEquals(333, ret.getNestedObject().getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", ret.getNestedObject().getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getNestedObject().getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", ret.getNestedObject().getNestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedObject().getNestedSomeBytes());
|
||||
|
||||
var des = (NestedObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(333, des.object.numfield);
|
||||
Assertions.assertEquals(333, des.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", des.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", des.get_nestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInterface() {
|
||||
var ret = interfaceProtoSerializer.serialize(new RecordObject("record test"));
|
||||
Assertions.assertEquals("record test", ret.getRecordObject().getKey());
|
||||
var des = (RecordObject) interfaceProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals("record test", des.key());
|
||||
|
||||
var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234));
|
||||
Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey());
|
||||
Assertions.assertEquals(1234, ret2.getRecordObject2().getValue());
|
||||
var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2);
|
||||
Assertions.assertEquals("record test 2", des2.key());
|
||||
Assertions.assertEquals(1234, des2.value());
|
||||
}
|
||||
}
|
||||
24
dhfs-parent/autoprotomap/pom.xml
Normal file
24
dhfs-parent/autoprotomap/pom.xml
Normal file
@@ -0,0 +1,24 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>Autoprotomap - Parent</name>
|
||||
|
||||
<modules>
|
||||
<module>deployment</module>
|
||||
<module>runtime</module>
|
||||
<module>integration-tests</module>
|
||||
</modules>
|
||||
|
||||
</project>
|
||||
63
dhfs-parent/autoprotomap/runtime/pom.xml
Normal file
63
dhfs-parent/autoprotomap/runtime/pom.xml
Normal file
@@ -0,0 +1,63 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<name>Autoprotomap - Runtime</name>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>extension-descriptor</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<deployment>${project.groupId}:${project.artifactId}-deployment:${project.version}
|
||||
</deployment>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>default-compile</id>
|
||||
<configuration>
|
||||
<annotationProcessorPaths>
|
||||
<path>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-processor</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
</path>
|
||||
</annotationProcessorPaths>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -0,0 +1,12 @@
|
||||
package com.usatiuk.autoprotomap.runtime;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.CLASS)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface ProtoMirror {
|
||||
Class<?> value() default Object.class;
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs;
|
||||
package com.usatiuk.autoprotomap.runtime;
|
||||
|
||||
import com.google.protobuf.Message;
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
name: Autoprotomap
|
||||
#description: Do something useful.
|
||||
metadata:
|
||||
# keywords:
|
||||
# - autoprotomap
|
||||
# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension
|
||||
# categories:
|
||||
# - "miscellaneous"
|
||||
# status: "preview"
|
||||
@@ -1,127 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-scheduler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.slf4j</groupId>
|
||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
true
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
</junit.jupiter.execution.parallel.mode.default>
|
||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
0.5
|
||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<extensions>true</extensions>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,18 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
/**
|
||||
* ChunkData is a data structure that represents an immutable binary blob
|
||||
* @param key unique key
|
||||
* @param data binary data
|
||||
*/
|
||||
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return data.size();
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jmap.JMapHolder;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* File is a data structure that represents a file in the file system
|
||||
*
|
||||
* @param key unique key
|
||||
* @param mode file mode
|
||||
* @param cTime inode modification time
|
||||
* @param mTime modification time
|
||||
* @param symlink true if the file is a symlink, false otherwise
|
||||
*/
|
||||
public record File(JObjectKey key, long mode, long cTime, long mTime,
|
||||
boolean symlink
|
||||
) implements JDataRemote, JMapHolder<JMapLongKey> {
|
||||
public File withSymlink(boolean symlink) {
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withMode(long mode) {
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withCTime(long cTime) {
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withMTime(long mTime) {
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withCurrentMTime() {
|
||||
return new File(key, mode, cTime, System.currentTimeMillis(), symlink);
|
||||
}
|
||||
|
||||
public File withCurrentCTime() {
|
||||
return new File(key, mode, System.currentTimeMillis(), mTime, symlink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return Set.of();
|
||||
// return Set.copyOf(chunks().values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return 64;
|
||||
// return chunks.size() * 64;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends JDataRemoteDto> dtoClass() {
|
||||
return FileDto.class;
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* FileDto is a data transfer object that contains a file and its chunks.
|
||||
* @param file the file
|
||||
* @param chunks the list of chunks, each represented as a pair of a long and a JObjectKey
|
||||
*/
|
||||
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
|
||||
@Override
|
||||
public Class<? extends JDataRemote> objClass() {
|
||||
return File.class;
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.syncmap.DtoMapper;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
/**
|
||||
* Maps a {@link File} object to a {@link FileDto} object and vice versa.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileDtoMapper implements DtoMapper<File, FileDto> {
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
@Inject
|
||||
FileHelper fileHelper;
|
||||
|
||||
@Override
|
||||
public FileDto toDto(File obj) {
|
||||
return new FileDto(obj, fileHelper.getChunks(obj));
|
||||
}
|
||||
|
||||
@Override
|
||||
public File fromDto(FileDto dto) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Helper class for working with files.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileHelper {
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
/**
|
||||
* Get the chunks of a file.
|
||||
* Transaction is expected to be already started.
|
||||
* @param file the file to get chunks from
|
||||
* @return a list of pairs of chunk offset and chunk key
|
||||
*/
|
||||
public List<Pair<Long, JObjectKey>> getChunks(File file) {
|
||||
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
|
||||
try (var it = jMapHelper.getIterator(file)) {
|
||||
while (it.hasNext()) {
|
||||
var cur = it.next();
|
||||
chunks.add(Pair.of(cur.getKey().key(), cur.getValue().ref()));
|
||||
}
|
||||
}
|
||||
return List.copyOf(chunks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace the chunks of a file.
|
||||
* All previous chunks will be deleted.
|
||||
* Transaction is expected to be already started.
|
||||
* @param file the file to replace chunks in
|
||||
* @param chunks the list of pairs of chunk offset and chunk key
|
||||
*/
|
||||
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
|
||||
jMapHelper.deleteAll(file);
|
||||
|
||||
for (var f : chunks) {
|
||||
jMapHelper.put(file, JMapLongKey.of(f.getLeft()), f.getRight());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,242 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.remoteobj.*;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.pcollections.HashPMap;
|
||||
import org.pcollections.HashTreePMap;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Handles synchronization of file objects.
|
||||
*/
|
||||
@ApplicationScoped
|
||||
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
@Inject
|
||||
RemoteTransaction remoteTx;
|
||||
@Inject
|
||||
FileHelper fileHelper;
|
||||
|
||||
@Inject
|
||||
JKleppmannTreeManager jKleppmannTreeManager;
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTree() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs")).orElseThrow();
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve conflict between two file versions, update the file in storage and create a conflict file.
|
||||
*
|
||||
* @param from the peer that sent the update
|
||||
* @param key the key of the file
|
||||
* @param receivedChangelog the changelog of the received file
|
||||
* @param receivedData the received file data
|
||||
*/
|
||||
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
|
||||
@Nullable FileDto receivedData) {
|
||||
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
|
||||
|
||||
if (!oursCurMeta.knownType().isAssignableFrom(File.class))
|
||||
throw new IllegalStateException("Object type mismatch: " + oursCurMeta.knownType() + " vs " + File.class);
|
||||
|
||||
if (!oursCurMeta.knownType().equals(File.class))
|
||||
oursCurMeta = oursCurMeta.withKnownType(File.class);
|
||||
|
||||
curTx.put(oursCurMeta);
|
||||
|
||||
var oursCurFile = remoteTx.getDataLocal(File.class, key).orElse(null);
|
||||
if (oursCurFile == null)
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
|
||||
|
||||
var theirsFile = receivedData.file();
|
||||
|
||||
var oursChunks = fileHelper.getChunks(oursCurFile);
|
||||
|
||||
File first;
|
||||
File second;
|
||||
List<Pair<Long, JObjectKey>> firstChunks;
|
||||
List<Pair<Long, JObjectKey>> secondChunks;
|
||||
PeerId otherHostname;
|
||||
|
||||
if (oursCurFile.mTime() >= theirsFile.mTime()) {
|
||||
first = oursCurFile;
|
||||
firstChunks = oursChunks;
|
||||
second = theirsFile;
|
||||
secondChunks = receivedData.chunks();
|
||||
otherHostname = from;
|
||||
} else {
|
||||
second = oursCurFile;
|
||||
secondChunks = oursChunks;
|
||||
first = theirsFile;
|
||||
firstChunks = receivedData.chunks();
|
||||
otherHostname = persistentPeerDataService.getSelfUuid();
|
||||
}
|
||||
|
||||
Log.tracev("Conflict resolution: ours: {0}, theirs: {1}, chunks: {2}, {3}", oursCurFile, theirsFile, oursChunks, receivedData.chunks());
|
||||
Log.tracev("Conflict resolution: first: {0}, second: {1}, chunks: {2}, {3}", first, second, firstChunks, secondChunks);
|
||||
|
||||
HashPMap<PeerId, Long> newChangelog = HashTreePMap.from(oursCurMeta.changelog());
|
||||
|
||||
for (var entry : receivedChangelog.entrySet()) {
|
||||
newChangelog = newChangelog.plus(entry.getKey(),
|
||||
Long.max(newChangelog.getOrDefault(entry.getKey(), 0L), entry.getValue())
|
||||
);
|
||||
}
|
||||
|
||||
oursCurMeta = oursCurMeta.withChangelog(newChangelog);
|
||||
curTx.put(oursCurMeta);
|
||||
|
||||
boolean chunksDiff = !Objects.equals(firstChunks, secondChunks);
|
||||
|
||||
boolean wasChanged = first.mTime() != second.mTime()
|
||||
|| first.cTime() != second.cTime()
|
||||
|| first.mode() != second.mode()
|
||||
|| first.symlink() != second.symlink()
|
||||
|| chunksDiff;
|
||||
|
||||
if (wasChanged) {
|
||||
oursCurMeta = oursCurMeta.withChangelog(
|
||||
newChangelog.plus(persistentPeerDataService.getSelfUuid(), newChangelog.getOrDefault(persistentPeerDataService.getSelfUuid(), 0L) + 1)
|
||||
);
|
||||
curTx.put(oursCurMeta);
|
||||
|
||||
remoteTx.putDataRaw(oursCurFile.withCTime(first.cTime()).withMTime(first.mTime()).withMode(first.mode()).withSymlink(first.symlink()));
|
||||
fileHelper.replaceChunks(oursCurFile, firstChunks);
|
||||
|
||||
var newFile = new File(JObjectKey.random(), second.mode(), second.cTime(), second.mTime(), second.symlink());
|
||||
remoteTx.putData(newFile);
|
||||
fileHelper.replaceChunks(newFile, secondChunks);
|
||||
|
||||
var parent = fileService.inoToParent(oursCurFile.key());
|
||||
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
try {
|
||||
getTree().move(parent.getRight(),
|
||||
new JKleppmannTreeNodeMetaFile(
|
||||
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
|
||||
newFile.key()
|
||||
),
|
||||
getTree().getNewNodeId()
|
||||
);
|
||||
} catch (AlreadyExistsException aex) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} while (true);
|
||||
}
|
||||
|
||||
var curKnownRemoteVersion = oursCurMeta.knownRemoteVersions().get(from);
|
||||
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
|
||||
|
||||
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
|
||||
oursCurMeta = oursCurMeta.withKnownRemoteVersions(oursCurMeta.knownRemoteVersions().plus(from, receivedTotalVer));
|
||||
curTx.put(oursCurMeta);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRemoteUpdate(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
|
||||
@Nullable FileDto receivedData) {
|
||||
var current = curTx.get(RemoteObjectMeta.class, key).orElse(null);
|
||||
if (current == null) {
|
||||
current = new RemoteObjectMeta(key, HashTreePMap.empty());
|
||||
curTx.put(current);
|
||||
}
|
||||
|
||||
var changelogCompare = SyncHelper.compareChangelogs(current.changelog(), receivedChangelog);
|
||||
|
||||
switch (changelogCompare) {
|
||||
case EQUAL -> {
|
||||
Log.debug("No action on update: " + key + " from " + from);
|
||||
if (!current.hasLocalData() && receivedData != null) {
|
||||
current = current.withHaveLocal(true);
|
||||
curTx.put(current);
|
||||
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
|
||||
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
|
||||
|
||||
if (!current.knownType().isAssignableFrom(File.class))
|
||||
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
|
||||
|
||||
if (!current.knownType().equals(File.class))
|
||||
current = current.withKnownType(File.class);
|
||||
|
||||
curTx.put(current);
|
||||
|
||||
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
|
||||
}
|
||||
}
|
||||
case NEWER -> {
|
||||
Log.debug("Received newer index update than known: " + key + " from " + from);
|
||||
var newChangelog = receivedChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ?
|
||||
receivedChangelog : receivedChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L);
|
||||
current = current.withChangelog(newChangelog);
|
||||
|
||||
if (receivedData != null) {
|
||||
current = current.withHaveLocal(true);
|
||||
curTx.put(current);
|
||||
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
|
||||
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
|
||||
|
||||
if (!current.knownType().isAssignableFrom(File.class))
|
||||
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
|
||||
|
||||
if (!current.knownType().equals(File.class))
|
||||
current = current.withKnownType(File.class);
|
||||
|
||||
curTx.put(current);
|
||||
|
||||
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
|
||||
} else {
|
||||
current = current.withHaveLocal(false);
|
||||
curTx.put(current);
|
||||
}
|
||||
}
|
||||
case OLDER -> {
|
||||
Log.debug("Received older index update than known: " + key + " from " + from);
|
||||
return;
|
||||
}
|
||||
case CONFLICT -> {
|
||||
Log.debug("Conflict on update (inconsistent version): " + key + " from " + from);
|
||||
assert receivedData != null;
|
||||
resolveConflict(from, key, receivedChangelog, receivedData);
|
||||
// TODO:
|
||||
return;
|
||||
}
|
||||
}
|
||||
var curKnownRemoteVersion = current.knownRemoteVersions().get(from);
|
||||
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
|
||||
|
||||
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
|
||||
current = current.withKnownRemoteVersions(current.knownRemoteVersions().plus(from, receivedTotalVer));
|
||||
curTx.put(current);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* JKleppmannTreeNodeMetaDirectory is a record that represents a directory in the JKleppmann tree.
|
||||
* @param name the name of the directory
|
||||
*/
|
||||
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaDirectory(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* JKleppmannTreeNodeMetaFile is a record that represents a file in the JKleppmann tree.
|
||||
* @param name the name of the file
|
||||
* @param fileIno a reference to the `File` object
|
||||
*/
|
||||
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
|
||||
@Override
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaFile(name, fileIno);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return List.of(fileIno);
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
/**
|
||||
* DirectoryNotEmptyException is thrown when a directory is not empty.
|
||||
* This exception is used to indicate that a directory cannot be deleted
|
||||
* because it contains files or subdirectories.
|
||||
*/
|
||||
public class DirectoryNotEmptyException extends RuntimeException {
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
|
||||
/**
|
||||
* GetattrRes is a record that represents the result of a getattr operation.
|
||||
* @param mtime File modification time
|
||||
* @param ctime File creation time
|
||||
* @param mode File mode
|
||||
* @param type File type
|
||||
*/
|
||||
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Objects;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TestDataCleaner {
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
String tempDirectory;
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
||||
try {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
} catch (Exception ignored) {
|
||||
Log.warn("Couldn't cleanup test data on init");
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
public void purgeDirectory(File dir) {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
quarkus.grpc.server.use-separate-server=false
|
||||
dhfs.objects.peerdiscovery.interval=4s
|
||||
dhfs.objects.sync.timeout=30
|
||||
dhfs.objects.sync.ping.timeout=5
|
||||
dhfs.objects.invalidation.threads=16
|
||||
dhfs.objects.invalidation.delay=1000
|
||||
dhfs.fuse.root=${HOME}/dhfs_default/fuse
|
||||
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.objects.autosync.threads=8
|
||||
dhfs.objects.autosync.download-all=false
|
||||
dhfs.objects.move-processor.threads=8
|
||||
dhfs.objects.ref-processor.threads=8
|
||||
dhfs.local-discovery=true
|
||||
dhfs.peerdiscovery.timeout=10000
|
||||
quarkus.log.category."com.usatiuk".min-level=TRACE
|
||||
quarkus.log.category."com.usatiuk".level=TRACE
|
||||
quarkus.http.insecure-requests=enabled
|
||||
quarkus.http.ssl.client-auth=required
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
abstract public class TempDataProfile implements QuarkusTestProfile {
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
}
|
||||
|
||||
@Override
|
||||
final public Map<String, String> getConfigOverrides() {
|
||||
Path tempDirWithPrefix;
|
||||
try {
|
||||
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var ret = new HashMap<String, String>();
|
||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
||||
getConfigOverrides(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -1,490 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testcontainers.DockerClientFactory;
|
||||
import org.testcontainers.containers.GenericContainer;
|
||||
import org.testcontainers.containers.Network;
|
||||
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||
import org.testcontainers.containers.output.WaitingConsumer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
public class DhfsFuseIT {
|
||||
GenericContainer<?> container1;
|
||||
GenericContainer<?> container2;
|
||||
|
||||
WaitingConsumer waitingConsumer1;
|
||||
WaitingConsumer waitingConsumer2;
|
||||
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
network = Network.newNetwork();
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.withNetwork(network);
|
||||
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.withNetwork(network);
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
private void checkConsistency() {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
network.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void readWriteRewriteFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void createDelayedTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /dhfs_test/fuse/testf2").getExitCode());
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void writeRewriteDelayedTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.disconnectFromNetworkCmd().withNetworkId(network.getId()).withContainerId(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
Log.info("Deleting");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
Log.info("Deleted");
|
||||
|
||||
// FIXME?
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTestKickedOut() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.disconnectFromNetworkCmd().withNetworkId(network.getId()).withContainerId(container2.getContainerId()).exec();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("kicked"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty2 > /dhfs_test/fuse/testf2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo tesempty3 > /dhfs_test/fuse/testf3").getExitCode());
|
||||
|
||||
Log.info("Deleting");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
Log.info("Deleted");
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty2\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty3\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf3").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty2\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty3\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf3").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
Log.info("Creating");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Moving");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testf1 /dhfs_test/fuse/testf2").getExitCode());
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Reading");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void moveDirTest() throws IOException, InterruptedException, TimeoutException {
|
||||
Log.info("Creating");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testdir").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testdir/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testdir/testf1").getStdout()));
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Moving");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testdir2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testdir /dhfs_test/fuse/testdir2/testdirm").getExitCode());
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Reading");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testdir2/testdirm/testf1").getStdout()));
|
||||
}
|
||||
|
||||
|
||||
// TODO: This probably shouldn't be working right now
|
||||
@Test
|
||||
void removeAddHostTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request DELETE " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /dhfs_test/fuse/newfile1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /dhfs_test/fuse/newfile1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /dhfs_test/fuse/newfile2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /dhfs_test/fuse/newfile2").getExitCode());
|
||||
|
||||
Log.info("Re-adding");
|
||||
container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing removeAddHostTest");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
|
||||
Log.info(cat1);
|
||||
Log.info(cat2);
|
||||
Log.info(ls1);
|
||||
Log.info(ls2);
|
||||
|
||||
return cat1.getStdout().contains("jioadsd") && cat1.getStdout().contains("asvdkljm") && cat1.getStdout().contains("dfgvh") && cat1.getStdout().contains("dscfg")
|
||||
&& cat2.getStdout().contains("jioadsd") && cat2.getStdout().contains("asvdkljm") && cat2.getStdout().contains("dfgvh") && cat2.getStdout().contains("dscfg");
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void dirConflictTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /dhfs_test/fuse/testf"),
|
||||
Pair.of(container2, "echo test2 >> /dhfs_test/fuse/testf")).parallel().map(p -> {
|
||||
try {
|
||||
return p.getLeft().execInContainer("/bin/sh", "-c", p.getRight()).getExitCode();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}).anyMatch(r -> r != 0);
|
||||
Assumptions.assumeTrue(!createFail, "Failed creating one or more files");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var ls = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls);
|
||||
Log.info(cat);
|
||||
return cat.getStdout().contains("test1") && cat.getStdout().contains("test2");
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void dirConflictTest2() throws IOException, InterruptedException, TimeoutException {
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo fdsaio >> /dhfs_test/fuse/a/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo exgrg >> /dhfs_test/fuse/a/testf").getExitCode());
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
Log.warn("Waiting for connections");
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
Log.warn("Connected");
|
||||
|
||||
checkConsistency();
|
||||
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/a*/*");
|
||||
Assertions.assertTrue(ls1.getStdout().contains("fdsaio"));
|
||||
Assertions.assertTrue(ls1.getStdout().contains("exgrg"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void dirCycleTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/b").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /dhfs_test/fuse/a/testfa").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /dhfs_test/fuse/b/testfb").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /dhfs_test/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var c2ls = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f -exec cat {} \\;");
|
||||
return c2ls.getExitCode() == 0 && c2ls.getStdout().contains("xqr489") && c2ls.getStdout().contains("ahinou");
|
||||
});
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/a /dhfs_test/fuse/b").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/b /dhfs_test/fuse/a").getExitCode());
|
||||
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing dirCycleTest");
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse"));
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/a"));
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/b"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/a"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/b"));
|
||||
|
||||
var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -maxdepth 3 -type f -exec cat {} \\;");
|
||||
Log.info(c1ls2);
|
||||
var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -maxdepth 3 -type f -exec cat {} \\;");
|
||||
Log.info(c2ls2);
|
||||
|
||||
return c1ls2.getStdout().contains("xqr489") && c1ls2.getStdout().contains("ahinou")
|
||||
&& c2ls2.getStdout().contains("xqr489") && c2ls2.getStdout().contains("ahinou")
|
||||
&& c1ls2.getExitCode() == 0 && c2ls2.getExitCode() == 0;
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
void removeAndMove() throws IOException, InterruptedException, TimeoutException {
|
||||
var client = DockerClientFactory.instance().client();
|
||||
Log.info("Creating");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
Log.info("Removing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
Log.info("Moving");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testf1 /dhfs_test/fuse/testf2").getExitCode());
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Reading");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
// Either removed, or moved
|
||||
Log.info("Reading both");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info("cat1: " + cat1);
|
||||
Log.info("cat2: " + cat2);
|
||||
Log.info("ls1: " + ls1);
|
||||
Log.info("ls2: " + ls2);
|
||||
|
||||
if (!ls1.getStdout().equals(ls2.getStdout())) {
|
||||
Log.info("Different ls?");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ls1.getStdout().trim().isEmpty() && ls2.getStdout().trim().isEmpty()) {
|
||||
Log.info("Both empty");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!cat1.getStdout().equals(cat2.getStdout())) {
|
||||
Log.info("Different cat?");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(cat1.getExitCode() == 0 && cat2.getExitCode() == 0 && ls1.getExitCode() == 0 && ls2.getExitCode() == 0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean hasMoved = cat1.getStdout().contains("tesempty") && cat2.getStdout().contains("tesempty")
|
||||
&& ls1.getStdout().contains("testf2") && !ls1.getStdout().contains("testf1")
|
||||
&& ls2.getStdout().contains("testf2") && !ls2.getStdout().contains("testf1");
|
||||
|
||||
boolean removed = !cat1.getStdout().contains("tesempty") && !cat2.getStdout().contains("tesempty")
|
||||
&& !ls1.getStdout().contains("testf2") && !ls1.getStdout().contains("testf1")
|
||||
&& !ls2.getStdout().contains("testf2") && !ls2.getStdout().contains("testf1");
|
||||
|
||||
if (hasMoved && removed) {
|
||||
Log.info("Both removed and moved");
|
||||
return false;
|
||||
}
|
||||
|
||||
return hasMoved || removed;
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testcontainers.DockerClientFactory;
|
||||
import org.testcontainers.containers.GenericContainer;
|
||||
import org.testcontainers.containers.Network;
|
||||
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||
import org.testcontainers.containers.output.WaitingConsumer;
|
||||
import org.testcontainers.containers.wait.strategy.Wait;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
public class KillIT {
|
||||
GenericContainer<?> container1;
|
||||
GenericContainer<?> container2;
|
||||
|
||||
WaitingConsumer waitingConsumer1;
|
||||
WaitingConsumer waitingConsumer2;
|
||||
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
File data1;
|
||||
File data2;
|
||||
|
||||
Network network;
|
||||
|
||||
ExecutorService executor;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
executor = Executors.newCachedThreadPool();
|
||||
|
||||
data1 = Files.createTempDirectory("").toFile();
|
||||
data2 = Files.createTempDirectory("").toFile();
|
||||
|
||||
network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.withNetwork(network)
|
||||
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
|
||||
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.withNetwork(network)
|
||||
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
executor.close();
|
||||
network.close();
|
||||
}
|
||||
|
||||
private void checkConsistency() {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTest(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.start();
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.start();
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTest2(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
|
||||
container2.start();
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs2(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
|
||||
container2.start();
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
}
|
||||
@@ -1,215 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class LazyFs {
|
||||
private static final String lazyFsPath;
|
||||
|
||||
static {
|
||||
lazyFsPath = System.getProperty("lazyFsPath");
|
||||
System.out.println("LazyFs Path: " + lazyFsPath);
|
||||
}
|
||||
|
||||
private final String mountRoot;
|
||||
private final String dataRoot;
|
||||
private final String name;
|
||||
private final File configFile;
|
||||
private final File fifoFile;
|
||||
private Thread errPiper;
|
||||
private Thread outPiper;
|
||||
private CountDownLatch startLatch;
|
||||
private Process fs;
|
||||
public LazyFs(String name, String mountRoot, String dataRoot) {
|
||||
this.name = name;
|
||||
this.mountRoot = mountRoot;
|
||||
this.dataRoot = dataRoot;
|
||||
|
||||
try {
|
||||
configFile = File.createTempFile("lazyfs", ".conf");
|
||||
configFile.deleteOnExit();
|
||||
|
||||
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
|
||||
fifoFile.deleteOnExit();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
|
||||
}
|
||||
|
||||
private String fifoPath() {
|
||||
return fifoFile.getAbsolutePath();
|
||||
}
|
||||
|
||||
public void start(String extraOpts) {
|
||||
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
|
||||
if (!lfsPath.toFile().isFile())
|
||||
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
|
||||
if (!lfsPath.toFile().canExecute())
|
||||
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
|
||||
|
||||
try (var rwFile = new RandomAccessFile(configFile, "rw");
|
||||
var channel = rwFile.getChannel()) {
|
||||
channel.truncate(0);
|
||||
var config = "[faults]\n" +
|
||||
"fifo_path=\"" + fifoPath() + "\"\n" +
|
||||
"[cache]\n" +
|
||||
"apply_eviction=false\n" +
|
||||
"[cache.simple]\n" +
|
||||
"custom_size=\"1gb\"\n" +
|
||||
"blocks_per_page=1\n" +
|
||||
"[filesystem]\n" +
|
||||
"log_all_operations=false\n" +
|
||||
"logfile=\"\"\n" + extraOpts;
|
||||
rwFile.write(config.getBytes());
|
||||
Log.info("LazyFs config: \n" + config);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
var argList = new ArrayList<String>();
|
||||
|
||||
argList.add(lfsPath.toString());
|
||||
argList.add(Path.of(mountRoot).toString());
|
||||
argList.add("--config-path");
|
||||
argList.add(configFile.getAbsolutePath());
|
||||
argList.add("-o");
|
||||
argList.add("allow_other");
|
||||
argList.add("-o");
|
||||
argList.add("modules=subdir");
|
||||
argList.add("-o");
|
||||
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
|
||||
try {
|
||||
Log.info("Starting LazyFs " + argList);
|
||||
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
startLatch = new CountDownLatch(1);
|
||||
|
||||
outPiper = new Thread(() -> {
|
||||
try {
|
||||
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
|
||||
String line;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
if (line.contains("running LazyFS"))
|
||||
startLatch.countDown();
|
||||
System.out.println(line);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.info("Exception in LazyFs piper", e);
|
||||
}
|
||||
Log.info("LazyFs out piper finished");
|
||||
});
|
||||
outPiper.start();
|
||||
errPiper = new Thread(() -> {
|
||||
try {
|
||||
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
|
||||
String line;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
System.out.println(line);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.info("Exception in LazyFs piper", e);
|
||||
}
|
||||
Log.info("LazyFs err piper finished");
|
||||
});
|
||||
errPiper.start();
|
||||
|
||||
try {
|
||||
if (!startLatch.await(30, TimeUnit.SECONDS))
|
||||
throw new RuntimeException("StartLatch timed out");
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
Log.info("LazyFs started");
|
||||
}
|
||||
|
||||
public void start() {
|
||||
start("");
|
||||
}
|
||||
|
||||
private String mdbPath() {
|
||||
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
|
||||
}
|
||||
|
||||
public void startTornOp() {
|
||||
start("\n" +
|
||||
"[[injection]]\n" +
|
||||
"type=\"torn-seq\"\n" +
|
||||
"op=\"write\"\n" +
|
||||
"file=\"" + mdbPath() + "\"\n" +
|
||||
"persist=[1,4]\n" +
|
||||
"occurrence=3");
|
||||
}
|
||||
|
||||
public void startTornSeq() {
|
||||
start("[[injection]]\n" +
|
||||
"type=\"torn-op\"\n" +
|
||||
"file=\"" + mdbPath() + "\"\n" +
|
||||
"occurrence=3\n" +
|
||||
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
|
||||
"persist=[1,3]");
|
||||
}
|
||||
|
||||
public void crash() {
|
||||
try {
|
||||
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
|
||||
Log.info("Running command: " + cmd);
|
||||
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
try {
|
||||
synchronized (this) {
|
||||
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Doesn't actually work?
|
||||
//
|
||||
// public void crashop() {
|
||||
// try {
|
||||
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
|
||||
// System.out.println("Running command: " + cmd);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||
// Thread.sleep(1000);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||
// Thread.sleep(1000);
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// public void crashseq() {
|
||||
// try {
|
||||
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
|
||||
// System.out.println("Running command: " + cmd);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||
// Thread.sleep(1000);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||
// Thread.sleep(1000);
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
@@ -1,490 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testcontainers.DockerClientFactory;
|
||||
import org.testcontainers.containers.GenericContainer;
|
||||
import org.testcontainers.containers.Network;
|
||||
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||
import org.testcontainers.containers.output.WaitingConsumer;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
public class LazyFsIT {
|
||||
GenericContainer<?> container1;
|
||||
GenericContainer<?> container2;
|
||||
|
||||
WaitingConsumer waitingConsumer1;
|
||||
WaitingConsumer waitingConsumer2;
|
||||
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
File data1;
|
||||
File data2;
|
||||
File data1Lazy;
|
||||
File data2Lazy;
|
||||
|
||||
LazyFs lazyFs1;
|
||||
LazyFs lazyFs2;
|
||||
|
||||
ExecutorService executor;
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
executor = Executors.newCachedThreadPool();
|
||||
data1 = Files.createTempDirectory("dhfsdata").toFile();
|
||||
data2 = Files.createTempDirectory("dhfsdata").toFile();
|
||||
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||
|
||||
network = Network.newNetwork();
|
||||
|
||||
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
|
||||
lazyFs1.start();
|
||||
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
|
||||
lazyFs2.start();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.withNetwork(network)
|
||||
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
|
||||
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.withNetwork(network)
|
||||
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
lazyFs1.stop();
|
||||
lazyFs2.stop();
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data1Lazy);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
TestDataCleaner.purgeDirectory(data2Lazy);
|
||||
|
||||
executor.close();
|
||||
network.close();
|
||||
}
|
||||
|
||||
private void checkConsistency(String testName) {
|
||||
await().atMost(120, TimeUnit.SECONDS).until(() -> {
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info("Listing consistency " + testName + "\n"
|
||||
+ ls1 + "\n"
|
||||
+ cat1 + "\n"
|
||||
+ ls2 + "\n"
|
||||
+ cat2 + "\n");
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
|
||||
});
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs1.start();
|
||||
case TORN_OP -> lazyFs1.startTornOp();
|
||||
case TORN_SEQ -> lazyFs1.startTornSeq();
|
||||
}
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
Thread.sleep(3000);
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs1.start();
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs1.start();
|
||||
case TORN_OP -> lazyFs1.startTornOp();
|
||||
case TORN_SEQ -> lazyFs1.startTornSeq();
|
||||
}
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
Thread.sleep(3000);
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs1.start();
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs2.start();
|
||||
case TORN_OP -> lazyFs2.startTornOp();
|
||||
case TORN_SEQ -> lazyFs2.startTornSeq();
|
||||
}
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
var barrier2 = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier2.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier2.await();
|
||||
Log.info("Killing");
|
||||
Thread.sleep(3000);
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
lazyFs2.crash();
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs2.start();
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs2.start();
|
||||
case TORN_OP -> lazyFs2.startTornOp();
|
||||
case TORN_SEQ -> lazyFs2.startTornSeq();
|
||||
}
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
var barrier2 = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier2.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier2.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
lazyFs2.crash();
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs2.start();
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
|
||||
private static enum CrashType {
|
||||
CRASH,
|
||||
TORN_OP,
|
||||
TORN_SEQ
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
|
||||
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
|
||||
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.objects.deletion.delay=0
|
||||
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
|
||||
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
|
||||
quarkus.http.test-port=0
|
||||
quarkus.http.test-ssl-port=0
|
||||
dhfs.local-discovery=false
|
||||
dhfs.objects.persistence.snapshot-extra-checks=true
|
||||
@@ -18,11 +18,6 @@
|
||||
<artifactId>junit-jupiter-engine</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
@@ -35,9 +30,5 @@
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>jakarta.annotation</groupId>
|
||||
<artifactId>jakarta.annotation-api</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -1,8 +1,5 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Exception thrown when an attempt is made to create a new tree node as a child with a name that already exists.
|
||||
*/
|
||||
public class AlreadyExistsException extends RuntimeException {
|
||||
public AlreadyExistsException(String message) {
|
||||
super(message);
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public class AtomicClock implements Clock<Long>, Serializable {
|
||||
private long _max = 0;
|
||||
|
||||
public AtomicClock(long counter) {
|
||||
_max = counter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTimestamp() {
|
||||
return ++_max;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
_max = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long peekTimestamp() {
|
||||
return _max;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long updateTimestamp(Long receivedTimestamp) {
|
||||
var old = _max;
|
||||
_max = Math.max(_max, receivedTimestamp) + 1;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
@@ -1,26 +1,9 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Clock interface
|
||||
*/
|
||||
public interface Clock<TimestampT extends Comparable<TimestampT>> {
|
||||
/**
|
||||
* Increment and get the current timestamp.
|
||||
* @return the incremented timestamp
|
||||
*/
|
||||
TimestampT getTimestamp();
|
||||
|
||||
/**
|
||||
* Get the current timestamp without incrementing it.
|
||||
* @return the current timestamp
|
||||
*/
|
||||
TimestampT peekTimestamp();
|
||||
|
||||
/**
|
||||
* Update the timestamp with an externally received timestamp.
|
||||
* Will set the currently stored timestamp to <code>max(receivedTimestamp, currentTimestamp) + 1</code>
|
||||
* @param receivedTimestamp the received timestamp
|
||||
* @return the previous timestamp
|
||||
*/
|
||||
TimestampT updateTimestamp(TimestampT receivedTimestamp);
|
||||
}
|
||||
|
||||
@@ -3,13 +3,6 @@ package com.usatiuk.kleppmanntree;
|
||||
import java.io.Serializable;
|
||||
import java.util.Comparator;
|
||||
|
||||
/**
|
||||
* CombinedTimestamp is a record that represents a timestamp and a node ID, ordered first by timestamp and then by node ID.
|
||||
* @param timestamp the timestamp
|
||||
* @param nodeId the node ID. If null, then only the timestamp is used for ordering.
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the node ID
|
||||
*/
|
||||
public record CombinedTimestamp<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>>
|
||||
(TimestampT timestamp,
|
||||
PeerIdT nodeId) implements Comparable<CombinedTimestamp<TimestampT, PeerIdT>>, Serializable {
|
||||
|
||||
@@ -8,14 +8,6 @@ import java.util.function.Function;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
/**
|
||||
* An implementation of a tree as described in <a href="https://martin.kleppmann.com/papers/move-op.pdf">A highly-available move operation for replicated trees</a>
|
||||
*
|
||||
* @param <TimestampT> Type of the timestamp
|
||||
* @param <PeerIdT> Type of the peer ID
|
||||
* @param <MetaT> Type of the node metadata
|
||||
* @param <NodeIdT> Type of the node ID
|
||||
*/
|
||||
public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
|
||||
private static final Logger LOGGER = Logger.getLogger(KleppmannTree.class.getName());
|
||||
|
||||
@@ -23,15 +15,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
private final PeerInterface<PeerIdT> _peers;
|
||||
private final Clock<TimestampT> _clock;
|
||||
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
|
||||
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
|
||||
|
||||
/**
|
||||
* Constructor with all the dependencies
|
||||
*
|
||||
* @param storage Storage interface
|
||||
* @param peers Peer interface
|
||||
* @param clock Clock interface
|
||||
* @param opRecorder Operation recorder interface
|
||||
*/
|
||||
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
|
||||
PeerInterface<PeerIdT> peers,
|
||||
Clock<TimestampT> clock,
|
||||
@@ -42,13 +27,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
_opRecorder = opRecorder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Traverse the tree from the given node ID using the given list of names
|
||||
*
|
||||
* @param fromId The starting node ID
|
||||
* @param names The list of names to traverse
|
||||
* @return The resulting node ID or null if not found
|
||||
*/
|
||||
private NodeIdT traverseImpl(NodeIdT fromId, List<String> names) {
|
||||
if (names.isEmpty()) return fromId;
|
||||
|
||||
@@ -62,40 +40,31 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return traverseImpl(childId, names.subList(1, names.size()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Traverse the tree from its root node using the given list of names
|
||||
*
|
||||
* @param names The list of names to traverse
|
||||
* @return The resulting node ID or null if not found
|
||||
*/
|
||||
public NodeIdT traverse(NodeIdT fromId, List<String> names) {
|
||||
return traverseImpl(fromId, names.subList(1, names.size()));
|
||||
}
|
||||
|
||||
public NodeIdT traverse(List<String> names) {
|
||||
return traverseImpl(_storage.getRootId(), names);
|
||||
}
|
||||
|
||||
/**
|
||||
* Undo the effect of a log effect
|
||||
*
|
||||
* @param effect The log effect to undo
|
||||
*/
|
||||
private void undoEffect(LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT> effect) {
|
||||
if (effect.oldInfo() != null) {
|
||||
var node = _storage.getById(effect.childId());
|
||||
var curParent = _storage.getById(effect.newParentId());
|
||||
{
|
||||
var newCurParentChildren = curParent.children().minus(node.name());
|
||||
var newCurParentChildren = curParent.children().minus(node.meta().getName());
|
||||
curParent = curParent.withChildren(newCurParentChildren);
|
||||
_storage.putNode(curParent);
|
||||
}
|
||||
|
||||
if (effect.oldInfo().oldMeta() != null
|
||||
&& node.meta() != null
|
||||
&& !node.meta().getClass().equals(effect.oldInfo().oldMeta().getClass()))
|
||||
if (!node.meta().getClass().equals(effect.oldInfo().oldMeta().getClass()))
|
||||
throw new IllegalArgumentException("Class mismatch for meta for node " + node.key());
|
||||
|
||||
// Needs to be read after changing curParent, as it might be the same node
|
||||
var oldParent = _storage.getById(effect.oldInfo().oldParent());
|
||||
{
|
||||
var newOldParentChildren = oldParent.children().plus(effect.oldName(), node.key());
|
||||
var newOldParentChildren = oldParent.children().plus(node.meta().getName(), node.key());
|
||||
oldParent = oldParent.withChildren(newOldParentChildren);
|
||||
_storage.putNode(oldParent);
|
||||
}
|
||||
@@ -108,7 +77,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
var node = _storage.getById(effect.childId());
|
||||
var curParent = _storage.getById(effect.newParentId());
|
||||
{
|
||||
var newCurParentChildren = curParent.children().minus(node.name());
|
||||
var newCurParentChildren = curParent.children().minus(node.meta().getName());
|
||||
curParent = curParent.withChildren(newCurParentChildren);
|
||||
_storage.putNode(curParent);
|
||||
}
|
||||
@@ -116,47 +85,26 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
node.withParent(null)
|
||||
.withLastEffectiveOp(null)
|
||||
);
|
||||
_undoCtx.put(node.key(), node);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Undo the effects of a log record
|
||||
*
|
||||
* @param op The log record to undo
|
||||
*/
|
||||
private void undoOp(LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
|
||||
LOGGER.finer(() -> "Will undo op: " + op);
|
||||
if (op.effects() != null)
|
||||
for (var e : op.effects().reversed())
|
||||
undoEffect(e);
|
||||
}
|
||||
|
||||
/**
|
||||
* Redo the operation in a log record
|
||||
*
|
||||
* @param entry The log record to redo
|
||||
*/
|
||||
private void redoOp(Map.Entry<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> entry) {
|
||||
var newEffects = doOp(entry.getValue().op(), false);
|
||||
_storage.getLog().replace(entry.getKey(), newEffects);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform the operation and put it in the log
|
||||
*
|
||||
* @param op The operation to perform
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
private void doAndPut(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
var res = doOp(op, failCreatingIfExists);
|
||||
_storage.getLog().put(res.op().timestamp(), res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to trim the log to the causality threshold
|
||||
*/
|
||||
private void tryTrimLog() {
|
||||
var log = _storage.getLog();
|
||||
var timeLog = _storage.getPeerTimestampLog();
|
||||
@@ -192,8 +140,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if (!inTrash.isEmpty()) {
|
||||
var trash = _storage.getById(_storage.getTrashId());
|
||||
for (var n : inTrash) {
|
||||
@@ -212,55 +160,25 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Move a node to a new parent with new metadata
|
||||
*
|
||||
* @param newParent The new parent node ID
|
||||
* @param newMeta The new metadata
|
||||
* @param child The child node ID
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
public <LocalMetaT extends MetaT> void move(NodeIdT newParent, LocalMetaT newMeta, NodeIdT child) {
|
||||
move(newParent, newMeta, child, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Move a node to a new parent with new metadata
|
||||
*
|
||||
* @param newParent The new parent node ID
|
||||
* @param newMeta The new metadata
|
||||
* @param child The child node ID
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
public void move(NodeIdT newParent, MetaT newMeta, NodeIdT child, boolean failCreatingIfExists) {
|
||||
var createdMove = createMove(newParent, newMeta, child);
|
||||
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
|
||||
_opRecorder.recordOp(createdMove);
|
||||
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply an external operation from a remote peer
|
||||
*
|
||||
* @param from The peer ID
|
||||
* @param op The operation to apply
|
||||
*/
|
||||
public void applyExternalOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
|
||||
_clock.updateTimestamp(op.timestamp().timestamp());
|
||||
applyOp(from, op, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the causality threshold timestamp for a peer
|
||||
*
|
||||
* @param from The peer ID
|
||||
* @param newTimestamp The timestamp received from it
|
||||
* @return True if the timestamp was updated, false otherwise
|
||||
*/
|
||||
// Returns true if the timestamp is newer than what's seen, false otherwise
|
||||
private boolean updateTimestampImpl(PeerIdT from, TimestampT newTimestamp) {
|
||||
TimestampT oldRef = _storage.getPeerTimestampLog().getForPeer(from);
|
||||
if (oldRef != null && oldRef.compareTo(newTimestamp) >= 0) { // FIXME?
|
||||
if (oldRef != null && oldRef.compareTo(newTimestamp) > 0) { // FIXME?
|
||||
LOGGER.warning("Wrong op order: received older than known from " + from.toString());
|
||||
return false;
|
||||
}
|
||||
@@ -268,35 +186,20 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the causality threshold timestamp for a peer
|
||||
*
|
||||
* @param from The peer ID
|
||||
* @param timestamp The timestamp received from it
|
||||
*/
|
||||
public void updateExternalTimestamp(PeerIdT from, TimestampT timestamp) {
|
||||
public boolean updateExternalTimestamp(PeerIdT from, TimestampT timestamp) {
|
||||
// TODO: Ideally no point in this separate locking?
|
||||
var gotExt = _storage.getPeerTimestampLog().getForPeer(from);
|
||||
var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId());
|
||||
if (!(gotExt != null && gotExt.compareTo(timestamp) >= 0))
|
||||
updateTimestampImpl(from, timestamp);
|
||||
if (!(gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0))
|
||||
updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack?
|
||||
if ((gotExt != null && gotExt.compareTo(timestamp) >= 0)
|
||||
&& (gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0)) return false;
|
||||
updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack?
|
||||
updateTimestampImpl(from, timestamp);
|
||||
tryTrimLog();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply an operation from a peer
|
||||
*
|
||||
* @param from The peer ID
|
||||
* @param op The operation to apply
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
private void applyOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
if (!updateTimestampImpl(op.timestamp().nodeId(), op.timestamp().timestamp())) return;
|
||||
|
||||
LOGGER.finer(() -> "Will apply op: " + op + " from " + from);
|
||||
if (!updateTimestampImpl(from, op.timestamp().timestamp())) return;
|
||||
|
||||
var log = _storage.getLog();
|
||||
|
||||
@@ -309,61 +212,54 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
assert cmp != 0;
|
||||
if (cmp < 0) {
|
||||
if (log.containsKey(op.timestamp())) return;
|
||||
var toUndo = log.newestSlice(op.timestamp(), false);
|
||||
for (var entry : toUndo.reversed()) {
|
||||
undoOp(entry.getValue());
|
||||
try {
|
||||
if (log.containsKey(op.timestamp())) return;
|
||||
var toUndo = log.newestSlice(op.timestamp(), false);
|
||||
_undoCtx = new HashMap<>();
|
||||
for (var entry : toUndo.reversed()) {
|
||||
undoOp(entry.getValue());
|
||||
}
|
||||
try {
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
} finally {
|
||||
for (var entry : toUndo) {
|
||||
redoOp(entry);
|
||||
}
|
||||
|
||||
if (!_undoCtx.isEmpty()) {
|
||||
for (var e : _undoCtx.entrySet()) {
|
||||
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
|
||||
_storage.removeNode(e.getKey());
|
||||
}
|
||||
}
|
||||
_undoCtx = null;
|
||||
}
|
||||
} finally {
|
||||
tryTrimLog();
|
||||
}
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
for (var entry : toUndo) {
|
||||
redoOp(entry);
|
||||
}
|
||||
tryTrimLog();
|
||||
} else {
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
tryTrimLog();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a new timestamp, incrementing the one in storage
|
||||
*
|
||||
* @return A new timestamp
|
||||
*/
|
||||
private CombinedTimestamp<TimestampT, PeerIdT> getTimestamp() {
|
||||
return new CombinedTimestamp<>(_clock.getTimestamp(), _peers.getSelfId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new move operation
|
||||
*
|
||||
* @param newParent The new parent node ID
|
||||
* @param newMeta The new metadata
|
||||
* @param node The child node ID
|
||||
* @return A new move operation
|
||||
*/
|
||||
private <LocalMetaT extends MetaT> OpMove<TimestampT, PeerIdT, LocalMetaT, NodeIdT> createMove(NodeIdT newParent, LocalMetaT newMeta, NodeIdT node) {
|
||||
return new OpMove<>(getTimestamp(), newParent, newMeta, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform the operation and return the log record
|
||||
*
|
||||
* @param op The operation to perform
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @return The log record
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> doOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
LOGGER.finer(() -> "Doing op: " + op);
|
||||
LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computed;
|
||||
try {
|
||||
computed = computeEffects(op, failCreatingIfExists);
|
||||
} catch (AlreadyExistsException aex) {
|
||||
throw aex;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Error computing effects for op " + op.toString(), e);
|
||||
LOGGER.log(Level.SEVERE, "Error computing effects for op" + op.toString(), e);
|
||||
computed = new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
if (computed.effects() != null)
|
||||
@@ -371,27 +267,30 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return computed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a new node from storage
|
||||
*
|
||||
* @param key The node ID
|
||||
* @param parent The parent node ID
|
||||
* @param meta The metadata
|
||||
* @return A new tree node
|
||||
*/
|
||||
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
|
||||
if (_undoCtx != null) {
|
||||
var node = _undoCtx.get(key);
|
||||
if (node != null) {
|
||||
try {
|
||||
if (!node.children().isEmpty()) {
|
||||
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
|
||||
}
|
||||
node = node.withParent(parent).withMeta(meta);
|
||||
} catch (Exception e) {
|
||||
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
|
||||
node = null;
|
||||
}
|
||||
}
|
||||
if (node != null) {
|
||||
_undoCtx.remove(key);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
return _storage.createNewNode(key, parent, meta);
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply the effects of a log record
|
||||
*
|
||||
* @param sourceOp The source operation
|
||||
* @param effects The list of log effects
|
||||
*/
|
||||
private void applyEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> sourceOp, List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) {
|
||||
for (var effect : effects) {
|
||||
LOGGER.finer(() -> "Applying effect: " + effect + " from op " + sourceOp);
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> oldParentNode = null;
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> newParentNode;
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> node;
|
||||
@@ -405,7 +304,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
node = _storage.getById(effect.childId());
|
||||
}
|
||||
if (oldParentNode != null) {
|
||||
var newOldParentChildren = oldParentNode.children().minus(effect.oldName());
|
||||
var newOldParentChildren = oldParentNode.children().minus(effect.oldInfo().oldMeta().getName());
|
||||
oldParentNode = oldParentNode.withChildren(newOldParentChildren);
|
||||
_storage.putNode(oldParentNode);
|
||||
}
|
||||
@@ -414,12 +313,12 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
newParentNode = _storage.getById(effect.newParentId());
|
||||
|
||||
{
|
||||
var newNewParentChildren = newParentNode.children().plus(effect.newName(), effect.childId());
|
||||
var newNewParentChildren = newParentNode.children().plus(effect.newMeta().getName(), effect.childId());
|
||||
newParentNode = newParentNode.withChildren(newNewParentChildren);
|
||||
_storage.putNode(newParentNode);
|
||||
}
|
||||
if (effect.newParentId().equals(_storage.getTrashId()) &&
|
||||
!Objects.equals(effect.newName(), effect.childId().toString()))
|
||||
!Objects.equals(effect.newMeta().getName(), effect.childId().toString()))
|
||||
throw new IllegalArgumentException("Move to trash should have id of node as name");
|
||||
_storage.putNode(
|
||||
node.withParent(effect.newParentId())
|
||||
@@ -429,15 +328,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the effects of a move operation
|
||||
*
|
||||
* @param op The operation to process
|
||||
* @param failCreatingIfExists Whether to fail if there is a name conflict,
|
||||
* otherwise replace the existing node
|
||||
* @return The log record with the computed effects
|
||||
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
|
||||
*/
|
||||
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computeEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
var node = _storage.getById(op.childId());
|
||||
|
||||
@@ -445,46 +335,32 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
NodeIdT newParentId = op.newParentId();
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> newParent = _storage.getById(newParentId);
|
||||
|
||||
|
||||
if (newParent == null) {
|
||||
LOGGER.log(Level.SEVERE, "New parent not found " + op.newName() + " " + op.childId());
|
||||
|
||||
// Creation
|
||||
if (oldParentId == null) {
|
||||
LOGGER.severe(() -> "Creating both dummy parent and child node");
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(null, op, _storage.getLostFoundId(), null, newParentId),
|
||||
new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId())
|
||||
));
|
||||
} else {
|
||||
LOGGER.severe(() -> "Moving child node to dummy parent");
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(null, op, _storage.getLostFoundId(), null, newParentId),
|
||||
new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, node.meta()), op, op.newParentId(), op.newMeta(), op.childId())
|
||||
));
|
||||
}
|
||||
LOGGER.log(Level.SEVERE, "New parent not found " + op.newMeta().getName() + " " + op.childId());
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
if (oldParentId == null) {
|
||||
var conflictNodeId = newParent.children().get(op.newName());
|
||||
var conflictNodeId = newParent.children().get(op.newMeta().getName());
|
||||
|
||||
if (conflictNodeId != null) {
|
||||
if (failCreatingIfExists)
|
||||
throw new AlreadyExistsException("Already exists: " + op.newName() + ": " + conflictNodeId);
|
||||
throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId);
|
||||
|
||||
var conflictNode = _storage.getById(conflictNodeId);
|
||||
MetaT conflictNodeMeta = conflictNode.meta();
|
||||
|
||||
LOGGER.finer(() -> "Node creation conflict: " + conflictNode);
|
||||
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
String newConflictNodeName = op.newName() + ".conflict." + conflictNode.key();
|
||||
String newOursName = op.newName() + ".conflict." + op.childId();
|
||||
String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.key();
|
||||
String newOursName = op.newMeta().getName() + ".conflict." + op.childId();
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(new LogEffectOld<>(conflictNode.lastEffectiveOp(), newParentId, conflictNodeMeta), conflictNode.lastEffectiveOp(), newParentId, (MetaT) conflictNodeMeta.withName(newConflictNodeName), conflictNodeId),
|
||||
new LogEffect<>(null, op, op.newParentId(), (MetaT) op.newMeta().withName(newOursName), op.childId())
|
||||
));
|
||||
} else {
|
||||
LOGGER.finer(() -> "Simple node creation");
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId())
|
||||
));
|
||||
@@ -496,38 +372,29 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
MetaT oldMeta = node.meta();
|
||||
if (oldMeta != null
|
||||
&& op.newMeta() != null
|
||||
&& !oldMeta.getClass().equals(op.newMeta().getClass())) {
|
||||
throw new RuntimeException("Class mismatch for meta for node " + node.key());
|
||||
if (!oldMeta.getClass().equals(op.newMeta().getClass())) {
|
||||
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
var replaceNodeId = newParent.children().get(op.newName());
|
||||
var replaceNodeId = newParent.children().get(op.newMeta().getName());
|
||||
if (replaceNodeId != null) {
|
||||
var replaceNode = _storage.getById(replaceNodeId);
|
||||
var replaceNodeMeta = replaceNode.meta();
|
||||
|
||||
LOGGER.finer(() -> "Node replacement: " + replaceNode);
|
||||
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(new LogEffectOld<>(replaceNode.lastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.lastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId),
|
||||
new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId())
|
||||
));
|
||||
}
|
||||
|
||||
LOGGER.finer(() -> "Simple node move");
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId())
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a node is an ancestor of another node
|
||||
*
|
||||
* @param child The child node ID
|
||||
* @param parent The parent node ID
|
||||
* @return True if the child is an ancestor of the parent, false otherwise
|
||||
*/
|
||||
private boolean isAncestor(NodeIdT child, NodeIdT parent) {
|
||||
var node = _storage.getById(parent);
|
||||
NodeIdT curParent;
|
||||
@@ -538,11 +405,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Walk the tree and apply the given consumer to each node
|
||||
*
|
||||
* @param consumer The consumer to apply to each node
|
||||
*/
|
||||
public void walkTree(Consumer<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> consumer) {
|
||||
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
|
||||
queue.push(_storage.getRootId());
|
||||
@@ -556,12 +418,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the parent of a node that matches the given predicate
|
||||
*
|
||||
* @param kidPredicate The predicate to match the child node
|
||||
* @return A pair containing the name of the child and the ID of the parent, or null if not found
|
||||
*/
|
||||
public Pair<String, NodeIdT> findParent(Function<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>, Boolean> kidPredicate) {
|
||||
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
|
||||
queue.push(_storage.getRootId());
|
||||
@@ -582,31 +438,24 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Record the bootstrap operations for a given peer
|
||||
* Will visit all nodes of the tree and add their effective operations to both the queue to be sent to the peer,
|
||||
* and to the global operation log.
|
||||
*
|
||||
* @param host The peer ID
|
||||
*/
|
||||
public void recordBoostrapFor(PeerIdT host) {
|
||||
TreeMap<CombinedTimestamp<TimestampT, PeerIdT>, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT>> result = new TreeMap<>();
|
||||
|
||||
walkTree(node -> {
|
||||
var op = node.lastEffectiveOp();
|
||||
if (node.lastEffectiveOp() == null) return;
|
||||
LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newName() + " " + op.childId() + "->" + op.newParentId());
|
||||
LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
result.put(node.lastEffectiveOp().timestamp(), node.lastEffectiveOp());
|
||||
});
|
||||
|
||||
for (var le : _storage.getLog().getAll()) {
|
||||
var op = le.getValue().op();
|
||||
LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newName() + " " + op.childId() + "->" + op.newParentId());
|
||||
LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
result.put(le.getKey(), le.getValue().op());
|
||||
}
|
||||
|
||||
for (var op : result.values()) {
|
||||
LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newName() + " " + op.childId() + "->" + op.newParentId());
|
||||
LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
_opRecorder.recordOpForPeer(host, op);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,35 +2,10 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* LogEffect is a record that represents the effect of a log entry on a tree node.
|
||||
* @param oldInfo the old information about the node, before it was moved. Null if the node did not exist before
|
||||
* @param effectiveOp the operation that had caused this effect to be applied
|
||||
* @param newParentId the ID of the new parent node
|
||||
* @param newMeta the new metadata of the node
|
||||
* @param childId the ID of the child node
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>(
|
||||
LogEffectOld<TimestampT, PeerIdT, MetaT, NodeIdT> oldInfo,
|
||||
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> effectiveOp,
|
||||
NodeIdT newParentId,
|
||||
MetaT newMeta,
|
||||
NodeIdT childId) implements Serializable {
|
||||
public String oldName() {
|
||||
if (oldInfo.oldMeta() != null) {
|
||||
return oldInfo.oldMeta().name();
|
||||
}
|
||||
return childId.toString();
|
||||
}
|
||||
|
||||
public String newName() {
|
||||
if (newMeta != null) {
|
||||
return newMeta.name();
|
||||
}
|
||||
return childId.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,16 +2,6 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Represents the old information about a node before it was moved.
|
||||
* @param oldEffectiveMove the old effective move that had caused this effect to be applied
|
||||
* @param oldParent the ID of the old parent node
|
||||
* @param oldMeta the old metadata of the node
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public record LogEffectOld<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> oldEffectiveMove,
|
||||
NodeIdT oldParent,
|
||||
|
||||
@@ -4,82 +4,29 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* LogInterface is an interface that allows accessing the log of operations
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public interface LogInterface<
|
||||
TimestampT extends Comparable<TimestampT>,
|
||||
PeerIdT extends Comparable<PeerIdT>,
|
||||
MetaT extends NodeMeta,
|
||||
NodeIdT> {
|
||||
/**
|
||||
* Peek the oldest log entry.
|
||||
* @return the oldest log entry
|
||||
*/
|
||||
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> peekOldest();
|
||||
|
||||
/**
|
||||
* Take the oldest log entry.
|
||||
* @return the oldest log entry
|
||||
*/
|
||||
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> takeOldest();
|
||||
|
||||
/**
|
||||
* Peek the newest log entry.
|
||||
* @return the newest log entry
|
||||
*/
|
||||
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> peekNewest();
|
||||
|
||||
/**
|
||||
* Return all log entries that are newer than the given timestamp.
|
||||
* @param since the timestamp to compare with
|
||||
* @param inclusive if true, include the log entry with the given timestamp
|
||||
* @return a list of log entries that are newer than the given timestamp
|
||||
*/
|
||||
List<Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>>>
|
||||
newestSlice(CombinedTimestamp<TimestampT, PeerIdT> since, boolean inclusive);
|
||||
|
||||
/**
|
||||
* Return all the log entries
|
||||
* @return a list of all log entries
|
||||
*/
|
||||
List<Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>>> getAll();
|
||||
|
||||
/**
|
||||
* Checks if the log is empty.
|
||||
* @return true if the log is empty, false otherwise
|
||||
*/
|
||||
boolean isEmpty();
|
||||
|
||||
/**
|
||||
* Checks if the log contains the given timestamp.
|
||||
* @param timestamp the timestamp to check
|
||||
* @return true if the log contains the given timestamp, false otherwise
|
||||
*/
|
||||
boolean containsKey(CombinedTimestamp<TimestampT, PeerIdT> timestamp);
|
||||
|
||||
/**
|
||||
* Get the size of the log.
|
||||
* @return the size of the log (number of entries)
|
||||
*/
|
||||
long size();
|
||||
|
||||
/**
|
||||
* Add a log entry to the log.
|
||||
* @param timestamp the timestamp of the log entry
|
||||
* @param record the log entry
|
||||
* @throws IllegalStateException if the log entry already exists
|
||||
*/
|
||||
void put(CombinedTimestamp<TimestampT, PeerIdT> timestamp, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> record);
|
||||
|
||||
/**
|
||||
* Replace a log entry in the log.
|
||||
* @param timestamp the timestamp of the log entry
|
||||
* @param record the log entry
|
||||
*/
|
||||
void replace(CombinedTimestamp<TimestampT, PeerIdT> timestamp, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> record);
|
||||
}
|
||||
|
||||
@@ -3,15 +3,6 @@ package com.usatiuk.kleppmanntree;
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Represents a log record in the Kleppmann tree.
|
||||
* @param op the operation that is stored in this log record
|
||||
* @param effects the effects of the operation (resulting moves)
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public record LogRecord<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op,
|
||||
List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) implements Serializable {
|
||||
|
||||
@@ -2,24 +2,8 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Represents metadata associated with a node in the Kleppmann tree.
|
||||
* This interface is used to define the metadata that can be associated with nodes in the tree.
|
||||
* Implementations of this interface should provide a name for the node and a method to create a copy of it with a new name.
|
||||
*/
|
||||
public interface NodeMeta extends Serializable {
|
||||
/**
|
||||
* Returns the name of the node.
|
||||
*
|
||||
* @return the name of the node
|
||||
*/
|
||||
String name();
|
||||
String getName();
|
||||
|
||||
/**
|
||||
* Creates a copy of the metadata with a new name.
|
||||
*
|
||||
* @param name the new name for the metadata
|
||||
* @return a new instance of NodeMeta with the specified name
|
||||
*/
|
||||
NodeMeta withName(String name);
|
||||
}
|
||||
|
||||
@@ -2,30 +2,7 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Operation that moves a child node to a new parent node.
|
||||
*
|
||||
* @param timestamp the timestamp of the operation
|
||||
* @param newParentId the ID of the new parent node
|
||||
* @param newMeta the new metadata of the node, can be null
|
||||
* @param childId the ID of the child node (the node that is being moved)
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(CombinedTimestamp<TimestampT, PeerIdT> timestamp, NodeIdT newParentId, MetaT newMeta,
|
||||
NodeIdT childId) implements Serializable {
|
||||
/**
|
||||
* Returns the new name of the node: name extracted from the new metadata if available,
|
||||
* otherwise the child ID converted to string.
|
||||
*
|
||||
* @return the new name of the node
|
||||
*/
|
||||
public String newName() {
|
||||
if (newMeta != null)
|
||||
return newMeta.name();
|
||||
return childId.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,26 +1,7 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Interface to provide recording operations to be sent to peers asynchronously.
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public interface OpRecorder<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
|
||||
/**
|
||||
* Records an operation to be sent to peers asynchronously.
|
||||
* The operation will be sent to all known peers in the system.
|
||||
*
|
||||
* @param op the operation to be recorded
|
||||
*/
|
||||
void recordOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op);
|
||||
|
||||
/**
|
||||
* Records an operation to be sent to a specific peer asynchronously.
|
||||
*
|
||||
* @param peer the ID of the peer to send the operation to
|
||||
* @param op the operation to be recorded
|
||||
*/
|
||||
void recordOpForPeer(PeerIdT peer, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op);
|
||||
}
|
||||
|
||||
@@ -2,22 +2,8 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
* Interface providing access to a list of known peers.
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
*/
|
||||
public interface PeerInterface<PeerIdT extends Comparable<PeerIdT>> {
|
||||
/**
|
||||
* Returns the ID of the current peer.
|
||||
*
|
||||
* @return the ID of the current peer
|
||||
*/
|
||||
PeerIdT getSelfId();
|
||||
|
||||
/**
|
||||
* Returns a collection of all known peers.
|
||||
*
|
||||
* @return a collection of all known peers
|
||||
*/
|
||||
Collection<PeerIdT> getAllPeers();
|
||||
}
|
||||
|
||||
@@ -1,26 +1,11 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Interface providing a map of newest received timestamps for each peer. (causality thresholds)
|
||||
* If a peer has some timestamp recorded in this map,
|
||||
* it means that all messages coming from this peer will have a newer timestamp.
|
||||
* @param <TimestampT>
|
||||
* @param <PeerIdT>
|
||||
*/
|
||||
public interface PeerTimestampLogInterface<
|
||||
TimestampT extends Comparable<TimestampT>,
|
||||
PeerIdT extends Comparable<PeerIdT>> {
|
||||
|
||||
/**
|
||||
* Get the timestamp for a specific peer.
|
||||
* @param peerId the ID of the peer
|
||||
* @return the timestamp for the peer
|
||||
*/
|
||||
TimestampT getForPeer(PeerIdT peerId);
|
||||
|
||||
/**
|
||||
* Get the timestamp for the current peer.
|
||||
*/
|
||||
void putForPeer(PeerIdT peerId, TimestampT timestamp);
|
||||
|
||||
}
|
||||
|
||||
@@ -1,89 +1,26 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
/**
|
||||
* Storage interface for the Kleppmann tree.
|
||||
*
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public interface StorageInterface<
|
||||
TimestampT extends Comparable<TimestampT>,
|
||||
PeerIdT extends Comparable<PeerIdT>,
|
||||
MetaT extends NodeMeta,
|
||||
NodeIdT> {
|
||||
/**
|
||||
* Get the root node ID.
|
||||
*
|
||||
* @return the root node IDx
|
||||
*/
|
||||
NodeIdT getRootId();
|
||||
|
||||
/**
|
||||
* Get the trash node ID.
|
||||
*
|
||||
* @return the trash node ID
|
||||
*/
|
||||
NodeIdT getTrashId();
|
||||
|
||||
/**
|
||||
* Get the lost and found node ID.
|
||||
*
|
||||
* @return the lost and found node ID
|
||||
*/
|
||||
NodeIdT getLostFoundId();
|
||||
|
||||
/**
|
||||
* Get the new node ID.
|
||||
*
|
||||
* @return the new node ID
|
||||
*/
|
||||
NodeIdT getNewNodeId();
|
||||
|
||||
/**
|
||||
* Get the node by its ID.
|
||||
*
|
||||
* @param id the ID of the node
|
||||
* @return the node with the specified ID, or null if not found
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getById(NodeIdT id);
|
||||
|
||||
/**
|
||||
* Create a new node with the specified key, parent, and metadata.
|
||||
*
|
||||
* @param key the ID of the new node
|
||||
* @param parent the ID of the parent node
|
||||
* @param meta the metadata of the new node
|
||||
* @return the new node
|
||||
*/
|
||||
// Creates a node, returned wrapper is RW-locked
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> createNewNode(NodeIdT key, NodeIdT parent, MetaT meta);
|
||||
|
||||
/**
|
||||
* Put a node into the storage.
|
||||
*
|
||||
* @param node the node to put into the storage
|
||||
*/
|
||||
void putNode(TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> node);
|
||||
|
||||
/**
|
||||
* Remove a node from the storage.
|
||||
*
|
||||
* @param id the ID of the node to remove
|
||||
*/
|
||||
void removeNode(NodeIdT id);
|
||||
|
||||
/**
|
||||
* Get the log interface.
|
||||
*
|
||||
* @return the log interface
|
||||
*/
|
||||
LogInterface<TimestampT, PeerIdT, MetaT, NodeIdT> getLog();
|
||||
|
||||
/**
|
||||
* Get the peer timestamp log interface.
|
||||
*
|
||||
* @return the peer timestamp log interface
|
||||
*/
|
||||
PeerTimestampLogInterface<TimestampT, PeerIdT> getPeerTimestampLog();
|
||||
}
|
||||
|
||||
@@ -1,96 +1,26 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import jakarta.annotation.Nullable;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Represents a node in the Kleppmann tree.
|
||||
*
|
||||
* @param <TimestampT> the type of the timestamp
|
||||
* @param <PeerIdT> the type of the peer ID
|
||||
* @param <MetaT> the type of the node metadata
|
||||
* @param <NodeIdT> the type of the node ID
|
||||
*/
|
||||
public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> extends Serializable {
|
||||
/**
|
||||
* Get the ID of the node.
|
||||
*
|
||||
* @return the ID of the node
|
||||
*/
|
||||
NodeIdT key();
|
||||
|
||||
/**
|
||||
* Get the ID of the parent node.
|
||||
*
|
||||
* @return the ID of the parent node
|
||||
*/
|
||||
NodeIdT parent();
|
||||
|
||||
/**
|
||||
* Get the last effective operation that moved this node.
|
||||
*
|
||||
* @return the last effective operation
|
||||
*/
|
||||
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp();
|
||||
|
||||
/**
|
||||
* Get the metadata stored in this node.
|
||||
*
|
||||
* @return the metadata of the node
|
||||
*/
|
||||
@Nullable
|
||||
MetaT meta();
|
||||
|
||||
/**
|
||||
* Get the name of the node.
|
||||
* If the node has metadata, the name is extracted from it, otherwise the key is converted to string.
|
||||
*
|
||||
* @return the name of the node
|
||||
*/
|
||||
default String name() {
|
||||
var meta = meta();
|
||||
if (meta != null) return meta.name();
|
||||
return key().toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the children of this node.
|
||||
*
|
||||
* @return a map of child IDs to their respective nodes
|
||||
*/
|
||||
PMap<String, NodeIdT> children();
|
||||
|
||||
/**
|
||||
* Make a copy of this node with a new parent.
|
||||
*
|
||||
* @param parent the ID of the new parent node
|
||||
* @return a new TreeNode instance with the updated parent
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withParent(NodeIdT parent);
|
||||
|
||||
/**
|
||||
* Make a copy of this node with a new last effective operation.
|
||||
*
|
||||
* @param lastEffectiveOp the new last effective operation
|
||||
* @return a new TreeNode instance with the updated last effective operation
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withLastEffectiveOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp);
|
||||
|
||||
/**
|
||||
* Make a copy of this node with new metadata.
|
||||
*
|
||||
* @param meta the new metadata
|
||||
* @return a new TreeNode instance with the updated metadata
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withMeta(MetaT meta);
|
||||
|
||||
/**
|
||||
* Make a copy of this node with new children.
|
||||
*
|
||||
* @param children the new children
|
||||
* @return a new TreeNode instance with the updated children
|
||||
*/
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withChildren(PMap<String, NodeIdT> children);
|
||||
}
|
||||
|
||||
@@ -2,15 +2,13 @@ package com.usatiuk.kleppmanntree;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.ValueSource;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class KleppmanTreeSimpleTest {
|
||||
private final TestNode testNode1 = new TestNode(1);
|
||||
private final TestNode testNode2 = new TestNode(2);
|
||||
private final TestNode testNode3 = new TestNode(3);
|
||||
|
||||
|
||||
@Test
|
||||
void circularTest() {
|
||||
@@ -91,75 +89,4 @@ public class KleppmanTreeSimpleTest {
|
||||
Assertions.assertTrue(testNode2._storageInterface.getLog().size() <= 1);
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@ValueSource(booleans = {true, false})
|
||||
void undoWithRenameTest(boolean opOrder) {
|
||||
var d1id = testNode1._storageInterface.getNewNodeId();
|
||||
var d2id = testNode2._storageInterface.getNewNodeId();
|
||||
var d3id = testNode3._storageInterface.getNewNodeId();
|
||||
testNode1._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d1id);
|
||||
testNode2._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d2id);
|
||||
testNode3._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d3id);
|
||||
var r1 = testNode1.getRecorded();
|
||||
var r2 = testNode2.getRecorded();
|
||||
var r3 = testNode3.getRecorded();
|
||||
Assertions.assertEquals(1, r1.size());
|
||||
Assertions.assertEquals(1, r2.size());
|
||||
Assertions.assertEquals(1, r3.size());
|
||||
|
||||
if (opOrder) {
|
||||
testNode2._tree.applyExternalOp(3L, r3.getFirst());
|
||||
testNode2._tree.applyExternalOp(1L, r1.getFirst());
|
||||
} else {
|
||||
testNode2._tree.applyExternalOp(1L, r1.getFirst());
|
||||
testNode2._tree.applyExternalOp(3L, r3.getFirst());
|
||||
}
|
||||
|
||||
Assertions.assertIterableEquals(List.of("Test1", "Test1.conflict." + d1id, "Test1.conflict." + d2id), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet());
|
||||
|
||||
if (opOrder) {
|
||||
testNode1._tree.applyExternalOp(3L, r3.getFirst());
|
||||
testNode1._tree.applyExternalOp(2L, r2.getFirst());
|
||||
} else {
|
||||
testNode1._tree.applyExternalOp(2L, r2.getFirst());
|
||||
testNode1._tree.applyExternalOp(3L, r3.getFirst());
|
||||
}
|
||||
|
||||
Assertions.assertIterableEquals(List.of("Test1", "Test1.conflict." + d1id, "Test1.conflict." + d2id), testNode1._storageInterface.getById(testNode1._storageInterface.getRootId()).children().keySet());
|
||||
|
||||
if (opOrder) {
|
||||
testNode3._tree.applyExternalOp(2L, r2.getFirst());
|
||||
testNode3._tree.applyExternalOp(1L, r1.getFirst());
|
||||
} else {
|
||||
testNode3._tree.applyExternalOp(1L, r1.getFirst());
|
||||
testNode3._tree.applyExternalOp(2L, r2.getFirst());
|
||||
}
|
||||
|
||||
Assertions.assertIterableEquals(List.of("Test1", "Test1.conflict." + d1id, "Test1.conflict." + d2id), testNode3._storageInterface.getById(testNode3._storageInterface.getRootId()).children().keySet());
|
||||
}
|
||||
|
||||
@Test
|
||||
void noFailedOpRecordTest() {
|
||||
var d1id = testNode1._storageInterface.getNewNodeId();
|
||||
var d2id = testNode1._storageInterface.getNewNodeId();
|
||||
testNode1._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d1id);
|
||||
Assertions.assertThrows(AlreadyExistsException.class, () -> testNode1._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d2id));
|
||||
var r1 = testNode1.getRecorded();
|
||||
Assertions.assertEquals(1, r1.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
void externalOpWithDummy() {
|
||||
Long d1id = testNode1._storageInterface.getNewNodeId();
|
||||
Long f1id = testNode1._storageInterface.getNewNodeId();
|
||||
|
||||
testNode1._tree.applyExternalOp(2L, new OpMove<>(
|
||||
new CombinedTimestamp<>(2L, 2L), d1id, new TestNodeMetaFile("Hi", 123), f1id
|
||||
));
|
||||
testNode1._tree.applyExternalOp(2L, new OpMove<>(
|
||||
new CombinedTimestamp<>(3L, 2L), testNode1._storageInterface.getRootId(), new TestNodeMetaDir("HiDir"), d1id
|
||||
));
|
||||
|
||||
Assertions.assertEquals(f1id, testNode1._tree.traverse(List.of("HiDir", "Hi")));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ public abstract class TestNodeMeta implements NodeMeta {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
public String getName() {
|
||||
return _name;
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ public class TestStorageInterface implements StorageInterface<Long, Long, TestNo
|
||||
_peerId = peerId;
|
||||
_nodes.put(getRootId(), new TestTreeNode(getRootId(), null, null));
|
||||
_nodes.put(getTrashId(), new TestTreeNode(getTrashId(), null, null));
|
||||
_nodes.put(getLostFoundId(), new TestTreeNode(getLostFoundId(), null, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -27,11 +26,6 @@ public class TestStorageInterface implements StorageInterface<Long, Long, TestNo
|
||||
return -1L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getLostFoundId() {
|
||||
return -2L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getNewNodeId() {
|
||||
return _curId++ | _peerId << 32;
|
||||
|
||||
@@ -18,11 +18,6 @@
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>net.jqwik</groupId>
|
||||
<artifactId>jqwik</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
@@ -36,6 +31,10 @@
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.openhft</groupId>
|
||||
<artifactId>zero-allocation-hashing</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter-engine</artifactId>
|
||||
@@ -55,6 +54,11 @@
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>supportlib</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5-mockito</artifactId>
|
||||
@@ -84,11 +88,6 @@
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@@ -100,6 +99,7 @@
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
public interface CloseableKvIterator<K extends Comparable<K>, V> extends Iterator<Pair<K, V>>, AutoCloseableNoThrow {
|
||||
K peekNextKey();
|
||||
|
||||
Class<?> peekNextType();
|
||||
|
||||
void skip();
|
||||
|
||||
K peekPrevKey();
|
||||
|
||||
Class<?> peekPrevType();
|
||||
|
||||
Pair<K, V> prev();
|
||||
|
||||
boolean hasPrev();
|
||||
|
||||
void skipPrev();
|
||||
|
||||
default CloseableKvIterator<K, V> reversed() {
|
||||
return new ReversedKvIterator<>(this);
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,18 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.dhfs.objects.transaction.Transaction;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.Optional;
|
||||
|
||||
@Singleton
|
||||
@ApplicationScoped
|
||||
public class CurrentTransaction implements Transaction {
|
||||
@Inject
|
||||
TransactionManager transactionManager;
|
||||
@@ -25,8 +28,8 @@ public class CurrentTransaction implements Transaction {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key) {
|
||||
return transactionManager.current().get(type, key);
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
return transactionManager.current().get(type, key, strategy);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -34,6 +37,12 @@ public class CurrentTransaction implements Transaction {
|
||||
transactionManager.current().delete(key);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Collection<JObjectKey> findAllObjects() {
|
||||
return transactionManager.current().findAllObjects();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return transactionManager.current().getIterator(start, key);
|
||||
@@ -43,9 +52,4 @@ public class CurrentTransaction implements Transaction {
|
||||
public <T extends JData> void put(JData obj) {
|
||||
transactionManager.current().put(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> void putNew(JData obj) {
|
||||
transactionManager.current().putNew(obj);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public record Data<V>(V value) implements MaybeTombstone<V> {
|
||||
@Override
|
||||
public Optional<V> opt() {
|
||||
return Optional.of(value);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface IterProdFn<K extends Comparable<K>, V> {
|
||||
CloseableKvIterator<K, V> get(IteratorStart start, K key);
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
// TODO: This could be maybe moved to a separate module?
|
||||
// The base class for JObject data
|
||||
// Only one instance of this "exists" per key, the instance in the manager is canonical
|
||||
// When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred.
|
||||
// It is immutable, its version is filled in by the allocator from the AllocVersionProvider
|
||||
public interface JData extends Serializable {
|
||||
JObjectKey key();
|
||||
|
||||
default int estimateSize() {
|
||||
return 100;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public interface JDataVersionedWrapper {
|
||||
JData data();
|
||||
|
||||
long version();
|
||||
|
||||
int estimateSize();
|
||||
}
|
||||
@@ -1,12 +1,9 @@
|
||||
package com.usatiuk.objects;
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import jakarta.annotation.Nonnull;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* Simple wrapper for an already-existing JData object with a version.
|
||||
*/
|
||||
public record JDataVersionedWrapperImpl(@Nonnull JData data,
|
||||
long version) implements Serializable, JDataVersionedWrapper {
|
||||
@Override
|
||||
@@ -0,0 +1,44 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
|
||||
public class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
|
||||
private final long _version;
|
||||
private ByteString _rawData;
|
||||
private JData _data;
|
||||
|
||||
public JDataVersionedWrapperLazy(long version, ByteString rawData) {
|
||||
_version = version;
|
||||
_rawData = rawData;
|
||||
}
|
||||
|
||||
public JData data() {
|
||||
if (_data != null)
|
||||
return _data;
|
||||
|
||||
synchronized (this) {
|
||||
if (_data != null)
|
||||
return _data;
|
||||
|
||||
try (var is = _rawData.newInput()) {
|
||||
_data = SerializationHelper.deserialize(is);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
_rawData = null;
|
||||
return _data;
|
||||
}
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return _version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
if (_data != null)
|
||||
return _data.estimateSize();
|
||||
return _rawData.size();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
public record JObjectKey(String name) implements Serializable, Comparable<JObjectKey> {
|
||||
public static JObjectKey of(String name) {
|
||||
return new JObjectKey(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(JObjectKey o) {
|
||||
return name.compareTo(o.name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public byte[] bytes() {
|
||||
return name.getBytes(StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
public ByteBuffer toByteBuffer() {
|
||||
var heapBb = StandardCharsets.UTF_8.encode(name);
|
||||
if (heapBb.isDirect()) return heapBb;
|
||||
var directBb = UninitializedByteBuffer.allocateUninitialized(heapBb.remaining());
|
||||
directBb.put(heapBb);
|
||||
directBb.flip();
|
||||
return directBb;
|
||||
}
|
||||
|
||||
public static JObjectKey fromBytes(byte[] bytes) {
|
||||
return new JObjectKey(new String(bytes, StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
public static JObjectKey fromByteBuffer(ByteBuffer buff) {
|
||||
return new JObjectKey(StandardCharsets.UTF_8.decode(buff).toString());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,239 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.snapshot.SnapshotManager;
|
||||
import com.usatiuk.dhfs.objects.transaction.*;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.enterprise.inject.Instance;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
// Manages all access to com.usatiuk.dhfs.objects.JData objects.
|
||||
// In particular, it serves as a source of truth for what is committed to the backing storage.
|
||||
// All data goes through it, it is responsible for transaction atomicity
|
||||
// TODO: persistent tx id
|
||||
@ApplicationScoped
|
||||
public class JObjectManager {
|
||||
private final List<PreCommitTxHook> _preCommitTxHooks;
|
||||
private boolean _ready = false;
|
||||
@Inject
|
||||
SnapshotManager snapshotManager;
|
||||
@Inject
|
||||
TransactionFactory transactionFactory;
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
|
||||
private void verifyReady() {
|
||||
if (!_ready) throw new IllegalStateException("Wrong service order!");
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(200) StartupEvent event) {
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
JObjectManager(Instance<PreCommitTxHook> preCommitTxHooks) {
|
||||
_preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList();
|
||||
}
|
||||
|
||||
public TransactionPrivate createTransaction() {
|
||||
verifyReady();
|
||||
var tx = transactionFactory.createTransaction();
|
||||
Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id());
|
||||
return tx;
|
||||
}
|
||||
|
||||
public TransactionHandle commit(TransactionPrivate tx) {
|
||||
verifyReady();
|
||||
var writes = new LinkedHashMap<JObjectKey, TxRecord.TxObjectRecord<?>>();
|
||||
var dependenciesLocked = new LinkedHashMap<JObjectKey, Optional<JDataVersionedWrapper>>();
|
||||
Map<JObjectKey, TransactionObject<?>> readSet;
|
||||
var toUnlock = new ArrayList<AutoCloseableNoThrow>();
|
||||
|
||||
Consumer<JObjectKey> addDependency =
|
||||
key -> {
|
||||
dependenciesLocked.computeIfAbsent(key, k -> {
|
||||
var lock = lockManager.lockObject(k);
|
||||
toUnlock.add(lock);
|
||||
return snapshotManager.readObjectDirect(k);
|
||||
});
|
||||
};
|
||||
|
||||
// For existing objects:
|
||||
// Check that their version is not higher than the version of transaction being committed
|
||||
// TODO: check deletions, inserts
|
||||
try {
|
||||
try {
|
||||
Function<JObjectKey, JData> getPrev =
|
||||
key -> switch (writes.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> write.data();
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> null;
|
||||
case null -> tx.getFromSource(JData.class, key).orElse(null);
|
||||
default -> {
|
||||
throw new TxCommitException("Unexpected value: " + writes.get(key));
|
||||
}
|
||||
};
|
||||
|
||||
boolean somethingChanged;
|
||||
do {
|
||||
somethingChanged = false;
|
||||
Map<JObjectKey, TxRecord.TxObjectRecord<?>> currentIteration = new HashMap();
|
||||
for (var hook : _preCommitTxHooks) {
|
||||
for (var n : tx.drainNewWrites())
|
||||
currentIteration.put(n.key(), n);
|
||||
Log.trace("Commit iteration with " + currentIteration.size() + " records for hook " + hook.getClass());
|
||||
|
||||
for (var entry : currentIteration.entrySet()) {
|
||||
somethingChanged = true;
|
||||
Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey());
|
||||
var oldObj = getPrev.apply(entry.getKey());
|
||||
switch (entry.getValue()) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
if (oldObj == null) {
|
||||
hook.onCreate(write.key(), write.data());
|
||||
} else {
|
||||
hook.onChange(write.key(), oldObj, write.data());
|
||||
}
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
hook.onDelete(deleted.key(), oldObj);
|
||||
}
|
||||
default -> throw new TxCommitException("Unexpected value: " + entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
writes.putAll(currentIteration);
|
||||
} while (somethingChanged);
|
||||
|
||||
if (writes.isEmpty()) {
|
||||
Log.trace("Committing transaction - no changes");
|
||||
return new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
runnable.run();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
} finally {
|
||||
readSet = tx.reads();
|
||||
|
||||
Stream.concat(readSet.keySet().stream(), writes.keySet().stream())
|
||||
.sorted(Comparator.comparing(JObjectKey::toString))
|
||||
.forEach(addDependency);
|
||||
|
||||
for (var read : readSet.entrySet()) {
|
||||
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
|
||||
toUnlock.add(locked.lock());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Log.trace("Committing transaction start");
|
||||
var snapshotId = tx.snapshot().id();
|
||||
|
||||
for (var read : readSet.entrySet()) {
|
||||
var dep = dependenciesLocked.get(read.getKey());
|
||||
|
||||
if (dep.isEmpty() != read.getValue().data().isEmpty()) {
|
||||
Log.trace("Checking read dependency " + read.getKey() + " - not found");
|
||||
throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
|
||||
}
|
||||
|
||||
if (dep.isEmpty()) {
|
||||
// TODO: Every write gets a dependency due to hooks
|
||||
continue;
|
||||
// assert false;
|
||||
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
|
||||
}
|
||||
|
||||
if (dep.get().version() > snapshotId) {
|
||||
Log.trace("Checking dependency " + read.getKey() + " - newer than");
|
||||
throw new TxCommitException("Serialization hazard: " + dep.get().data().key() + " " + dep.get().version() + " vs " + snapshotId);
|
||||
}
|
||||
|
||||
Log.trace("Checking dependency " + read.getKey() + " - ok with read");
|
||||
}
|
||||
|
||||
var addFlushCallback = snapshotManager.commitTx(
|
||||
writes.values().stream()
|
||||
.filter(r -> {
|
||||
if (r instanceof TxRecord.TxObjectRecordWrite<?>(JData data)) {
|
||||
var dep = dependenciesLocked.get(data.key());
|
||||
if (dep.isPresent() && dep.get().version() > snapshotId) {
|
||||
Log.trace("Skipping write " + data.key() + " - dependency " + dep.get().version() + " vs " + snapshotId);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}).toList());
|
||||
|
||||
for (var callback : tx.getOnCommit()) {
|
||||
callback.run();
|
||||
}
|
||||
|
||||
for (var callback : tx.getOnFlush()) {
|
||||
addFlushCallback.accept(callback);
|
||||
}
|
||||
|
||||
return new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
addFlushCallback.accept(runnable);
|
||||
}
|
||||
};
|
||||
} catch (Throwable t) {
|
||||
Log.trace("Error when committing transaction", t);
|
||||
throw new TxCommitException(t.getMessage(), t);
|
||||
} finally {
|
||||
for (var unlock : toUnlock) {
|
||||
unlock.close();
|
||||
}
|
||||
tx.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void rollback(TransactionPrivate tx) {
|
||||
verifyReady();
|
||||
tx.reads().forEach((key, value) -> {
|
||||
if (value instanceof TransactionObjectLocked<?> locked) {
|
||||
locked.lock().close();
|
||||
}
|
||||
});
|
||||
tx.close();
|
||||
}
|
||||
|
||||
// private class TransactionObjectSourceImpl implements TransactionObjectSource {
|
||||
// private final long _txId;
|
||||
//
|
||||
// private TransactionObjectSourceImpl(long txId) {
|
||||
// _txId = txId;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T extends JData> TransactionObject<T> get(Class<T> type, JObjectKey key) {
|
||||
// var got = getObj(type, key);
|
||||
// if (got.data().isPresent() && got.data().get().version() > _txId) {
|
||||
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
|
||||
// }
|
||||
// return got;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T extends JData> TransactionObject<T> getWriteLocked(Class<T> type, JObjectKey key) {
|
||||
// var got = getObjLock(type, key);
|
||||
// if (got.data().isPresent() && got.data().get().version() > _txId) {
|
||||
// got.lock().close();
|
||||
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
|
||||
// }
|
||||
// return got;
|
||||
// }
|
||||
// }
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
@ApplicationScoped
|
||||
public class JavaDataSerializer implements ObjectSerializer<JDataVersionedWrapper> {
|
||||
@Override
|
||||
public ByteString serialize(JDataVersionedWrapper obj) {
|
||||
ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
|
||||
buffer.putLong(obj.version());
|
||||
buffer.flip();
|
||||
return ByteString.copyFrom(buffer).concat(SerializationHelper.serialize(obj.data()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public JDataVersionedWrapper deserialize(ByteString data) {
|
||||
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
|
||||
var rawData = data.substring(Long.BYTES);
|
||||
return new JDataVersionedWrapperLazy(version, rawData);
|
||||
}
|
||||
}
|
||||
@@ -1,29 +1,16 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A key-value iterator that filters keys based on a predicate.
|
||||
*
|
||||
* @param <K> the type of the keys
|
||||
* @param <V> the type of the values
|
||||
*/
|
||||
public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<K, Boolean> _filter;
|
||||
private K _next;
|
||||
|
||||
/**
|
||||
* Constructs a KeyPredicateKvIterator with the specified backing iterator, start position, and filter.
|
||||
*
|
||||
* @param backing the backing iterator
|
||||
* @param start the starting position relative to the startKey
|
||||
* @param startKey the starting key
|
||||
* @param filter the filter function to apply to keys. Only keys for which this function returns true will be included in the iteration.
|
||||
*/
|
||||
public KeyPredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<K, Boolean> filter) {
|
||||
_goingForward = true;
|
||||
_backing = backing;
|
||||
@@ -53,20 +40,20 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
|
||||
}
|
||||
|
||||
|
||||
// switch (start) {
|
||||
// case LT -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
// }
|
||||
// case GT -> {
|
||||
// assert _next == null || _next.compareTo(startKey) > 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
// }
|
||||
// }
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
@@ -127,6 +114,11 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
|
||||
return got;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<? > peekTypeImpl() {
|
||||
return _goingForward ? _backing.peekNextType() : _backing.peekPrevType();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
@@ -0,0 +1,14 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.DataLocker;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
@ApplicationScoped
|
||||
public class LockManager {
|
||||
private final DataLocker _objLocker = new DataLocker();
|
||||
|
||||
public AutoCloseableNoThrow lockObject(JObjectKey key) {
|
||||
return _objLocker.lock(key);
|
||||
}
|
||||
}
|
||||
@@ -1,29 +1,19 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A mapping key-value iterator that transforms the values of a backing iterator using a specified function.
|
||||
*
|
||||
* @param <K> the type of the keys
|
||||
* @param <V> the type of the values in the backing iterator
|
||||
* @param <V_T> the type of the transformed values
|
||||
*/
|
||||
public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements CloseableKvIterator<K, V_T> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<V, V_T> _transformer;
|
||||
private final Function<Class<?>, Class<?>> _classMapper;
|
||||
|
||||
/**
|
||||
* Constructs a MappingKvIterator with the specified backing iterator and transformer function.
|
||||
*
|
||||
* @param backing the backing iterator
|
||||
* @param transformer the function to transform values
|
||||
*/
|
||||
public MappingKvIterator(CloseableKvIterator<K, V> backing, Function<V, V_T> transformer) {
|
||||
public MappingKvIterator(CloseableKvIterator<K, V> backing, Function<V, V_T> transformer, Function<Class<?>, Class<?>> classMapper) {
|
||||
_backing = backing;
|
||||
_transformer = transformer;
|
||||
_classMapper = classMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -31,6 +21,13 @@ public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements Close
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<?> peekNextType() {
|
||||
if (!hasNext())
|
||||
throw new NoSuchElementException();
|
||||
return _classMapper.apply(_backing.peekNextType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
@@ -51,6 +48,13 @@ public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements Close
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<?> peekPrevType() {
|
||||
if (!hasPrev())
|
||||
throw new NoSuchElementException();
|
||||
return _classMapper.apply(_backing.peekPrevType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V_T> prev() {
|
||||
var got = _backing.prev();
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public interface MaybeTombstone<T> {
|
||||
Optional<T> opt();
|
||||
}
|
||||
@@ -0,0 +1,333 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final NavigableMap<K, CloseableKvIterator<K, V>> _sortedIterators = new TreeMap<>();
|
||||
private final String _name;
|
||||
private Map<CloseableKvIterator<K, V>, Integer> _iterators;
|
||||
|
||||
private final IteratorStart _initialStartType;
|
||||
private final K _initialStartKey;
|
||||
|
||||
private interface FirstMatchState<K extends Comparable<K>, V> {
|
||||
}
|
||||
|
||||
private record FirstMatchNone<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
|
||||
}
|
||||
|
||||
private record FirstMatchFound<K extends Comparable<K>, V>(
|
||||
CloseableKvIterator<K, V> iterator) implements FirstMatchState<K, V> {
|
||||
}
|
||||
|
||||
private record FirstMatchConsumed<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
|
||||
}
|
||||
|
||||
// Fast path for the first element
|
||||
private FirstMatchState<K, V> _firstMatchState;
|
||||
private final List<IterProdFn<K, V>> _pendingIterators;
|
||||
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
|
||||
_goingForward = true;
|
||||
_name = name;
|
||||
_initialStartType = startType;
|
||||
_initialStartKey = startKey;
|
||||
|
||||
{
|
||||
int counter = 0;
|
||||
var iteratorsTmp = new HashMap<CloseableKvIterator<K, V>, Integer>();
|
||||
for (var iteratorFn : iterators) {
|
||||
var iterator = iteratorFn.get(startType, startKey);
|
||||
if ((counter == 0) // Not really a requirement but simplifies some things for now
|
||||
&& (startType == IteratorStart.GE || startType == IteratorStart.LE)
|
||||
&& iterator.hasNext()
|
||||
&& iterator.peekNextKey().equals(startKey)) {
|
||||
_firstMatchState = new FirstMatchFound<>(iterator);
|
||||
_pendingIterators = iterators;
|
||||
Log.tracev("{0} Created fast match: {1}", _name, _firstMatchState);
|
||||
return;
|
||||
}
|
||||
iteratorsTmp.put(iterator, counter++);
|
||||
}
|
||||
_iterators = Map.copyOf(iteratorsTmp);
|
||||
_pendingIterators = null;
|
||||
}
|
||||
|
||||
_firstMatchState = new FirstMatchNone<>();
|
||||
doInitialAdvance();
|
||||
}
|
||||
|
||||
private void doInitialAdvance() {
|
||||
if (_initialStartType == IteratorStart.LT || _initialStartType == IteratorStart.LE) {
|
||||
// Starting at a greatest key less than/less or equal than:
|
||||
// We have a bunch of iterators that have given us theirs "greatest LT/LE key"
|
||||
// now we need to pick the greatest of those to start with
|
||||
// But if some of them don't have a lesser key, we need to pick the smallest of those
|
||||
var found = _iterators.keySet().stream()
|
||||
.filter(CloseableKvIterator::hasNext)
|
||||
.map((i) -> {
|
||||
var peeked = i.peekNextKey();
|
||||
// Log.warnv("peeked: {0}, from {1}", peeked, i.getClass());
|
||||
return peeked;
|
||||
}).distinct().collect(Collectors.partitioningBy(e -> _initialStartType == IteratorStart.LE ? e.compareTo(_initialStartKey) <= 0 : e.compareTo(_initialStartKey) < 0));
|
||||
K initialMaxValue;
|
||||
if (!found.get(true).isEmpty())
|
||||
initialMaxValue = found.get(true).stream().max(Comparator.naturalOrder()).orElse(null);
|
||||
else
|
||||
initialMaxValue = found.get(false).stream().min(Comparator.naturalOrder()).orElse(null);
|
||||
|
||||
for (var iterator : _iterators.keySet()) {
|
||||
while (iterator.hasNext() && iterator.peekNextKey().compareTo(initialMaxValue) < 0) {
|
||||
iterator.skip();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
advanceIterator(iterator);
|
||||
}
|
||||
|
||||
Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
|
||||
switch (_initialStartType) {
|
||||
// case LT -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
|
||||
// }
|
||||
case GT -> {
|
||||
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(_initialStartKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(_initialStartKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void doHydrate() {
|
||||
if (_firstMatchState instanceof FirstMatchNone) {
|
||||
return;
|
||||
}
|
||||
|
||||
boolean consumed = _firstMatchState instanceof FirstMatchConsumed;
|
||||
if (_firstMatchState instanceof FirstMatchFound(CloseableKvIterator iterator)) {
|
||||
iterator.close();
|
||||
}
|
||||
|
||||
_firstMatchState = new FirstMatchNone<>();
|
||||
|
||||
{
|
||||
int counter = 0;
|
||||
var iteratorsTmp = new HashMap<CloseableKvIterator<K, V>, Integer>();
|
||||
for (var iteratorFn : _pendingIterators) {
|
||||
var iterator = iteratorFn.get(consumed ? IteratorStart.GT : IteratorStart.GE, _initialStartKey);
|
||||
iteratorsTmp.put(iterator, counter++);
|
||||
}
|
||||
_iterators = Map.copyOf(iteratorsTmp);
|
||||
}
|
||||
|
||||
doInitialAdvance();
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
|
||||
this(name, startType, startKey, List.of(iterators));
|
||||
}
|
||||
|
||||
private void advanceIterator(CloseableKvIterator<K, V> iterator) {
|
||||
if (!iterator.hasNext()) {
|
||||
return;
|
||||
}
|
||||
|
||||
K key = iterator.peekNextKey();
|
||||
Log.tracev("{0} Advance peeked: {1}-{2}", _name, iterator, key);
|
||||
if (!_sortedIterators.containsKey(key)) {
|
||||
_sortedIterators.put(key, iterator);
|
||||
return;
|
||||
}
|
||||
|
||||
// Expects that reversed iterator returns itself when reversed again
|
||||
var oursPrio = _iterators.get(_goingForward ? iterator : iterator.reversed());
|
||||
var them = _sortedIterators.get(key);
|
||||
var theirsPrio = _iterators.get(_goingForward ? them : them.reversed());
|
||||
if (oursPrio < theirsPrio) {
|
||||
_sortedIterators.put(key, iterator);
|
||||
advanceIterator(them);
|
||||
} else {
|
||||
Log.tracev("{0} Skipped: {1}", _name, iterator.peekNextKey());
|
||||
iterator.skip();
|
||||
advanceIterator(iterator);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
switch (_firstMatchState) {
|
||||
case FirstMatchFound<K, V> firstMatchFound -> {
|
||||
doHydrate();
|
||||
}
|
||||
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
|
||||
doHydrate();
|
||||
}
|
||||
default -> {
|
||||
}
|
||||
}
|
||||
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
Log.tracev("{0} Reversing from {1}", _name, cur);
|
||||
_goingForward = !_goingForward;
|
||||
_sortedIterators.clear();
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
// _goingForward inverted already
|
||||
advanceIterator(!_goingForward ? iterator.reversed() : iterator);
|
||||
}
|
||||
if (_sortedIterators.isEmpty() || cur == null) {
|
||||
return;
|
||||
}
|
||||
// Advance to the expected key, as we might have brought back some iterators
|
||||
// that were at their ends
|
||||
while (!_sortedIterators.isEmpty()
|
||||
&& ((_goingForward && peekImpl().compareTo(cur.getKey()) <= 0)
|
||||
|| (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) {
|
||||
skipImpl();
|
||||
}
|
||||
Log.tracev("{0} Reversed to {1}", _name, _sortedIterators);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected K peekImpl() {
|
||||
switch (_firstMatchState) {
|
||||
case FirstMatchFound<K, V> firstMatchFound -> {
|
||||
return firstMatchFound.iterator.peekNextKey();
|
||||
}
|
||||
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
|
||||
doHydrate();
|
||||
break;
|
||||
}
|
||||
default -> {
|
||||
}
|
||||
}
|
||||
|
||||
if (_sortedIterators.isEmpty())
|
||||
throw new NoSuchElementException();
|
||||
return _goingForward ? _sortedIterators.firstKey() : _sortedIterators.lastKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void skipImpl() {
|
||||
switch (_firstMatchState) {
|
||||
case FirstMatchFound<K, V> firstMatchFound -> {
|
||||
var curVal = firstMatchFound.iterator.next();
|
||||
firstMatchFound.iterator.close();
|
||||
_firstMatchState = new FirstMatchConsumed<>();
|
||||
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, firstMatchFound.iterator, curVal, _sortedIterators.keySet());
|
||||
return;
|
||||
}
|
||||
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
|
||||
doHydrate();
|
||||
break;
|
||||
}
|
||||
default -> {
|
||||
}
|
||||
}
|
||||
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
if (cur == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
cur.getValue().skip();
|
||||
advanceIterator(cur.getValue());
|
||||
Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean hasImpl() {
|
||||
switch (_firstMatchState) {
|
||||
case FirstMatchFound<K, V> firstMatchFound -> {
|
||||
return true;
|
||||
}
|
||||
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
|
||||
doHydrate();
|
||||
break;
|
||||
}
|
||||
default -> {
|
||||
}
|
||||
}
|
||||
return !_sortedIterators.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<K, V> nextImpl() {
|
||||
switch (_firstMatchState) {
|
||||
case FirstMatchFound<K, V> firstMatchFound -> {
|
||||
var curVal = firstMatchFound.iterator.next();
|
||||
firstMatchFound.iterator.close();
|
||||
_firstMatchState = new FirstMatchConsumed<>();
|
||||
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, firstMatchFound.iterator, curVal, _sortedIterators.keySet());
|
||||
return curVal;
|
||||
}
|
||||
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
|
||||
doHydrate();
|
||||
break;
|
||||
}
|
||||
default -> {
|
||||
}
|
||||
}
|
||||
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
if (cur == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
var curVal = cur.getValue().next();
|
||||
advanceIterator(cur.getValue());
|
||||
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, cur.getValue(), curVal, _sortedIterators.keySet());
|
||||
return curVal;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<? > peekTypeImpl() {
|
||||
switch (_firstMatchState) {
|
||||
case FirstMatchFound<K, V> firstMatchFound -> {
|
||||
return firstMatchFound.iterator().peekNextType();
|
||||
}
|
||||
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
|
||||
doHydrate();
|
||||
break;
|
||||
}
|
||||
default -> {
|
||||
}
|
||||
}
|
||||
|
||||
if (_sortedIterators.isEmpty())
|
||||
throw new NoSuchElementException();
|
||||
|
||||
return _goingForward
|
||||
? _sortedIterators.firstEntry().getValue().peekNextType()
|
||||
: _sortedIterators.lastEntry().getValue().peekNextType();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (_firstMatchState instanceof FirstMatchFound(CloseableKvIterator iterator)) {
|
||||
iterator.close();
|
||||
}
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
iterator.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MergingKvIterator{" +
|
||||
"_name='" + _name + '\'' +
|
||||
", _sortedIterators=" + _sortedIterators.keySet() +
|
||||
", _iterators=" + _iterators +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,44 +1,31 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* A key-value iterator for a {@link NavigableMap}.
|
||||
* It allows iterating over the keys and values in a sorted order.
|
||||
*
|
||||
* @param <K> the type of the keys
|
||||
* @param <V> the type of the values
|
||||
*/
|
||||
public class NavigableMapKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final NavigableMap<K, V> _map;
|
||||
private Iterator<Map.Entry<K, V>> _iterator;
|
||||
private Map.Entry<K, V> _next;
|
||||
|
||||
/**
|
||||
* Constructs a NavigableMapKvIterator with the specified map, start type, and start key.
|
||||
*
|
||||
* @param map the map to iterate over
|
||||
* @param start the starting position relative to the startKey
|
||||
* @param key the starting key
|
||||
*/
|
||||
public NavigableMapKvIterator(NavigableMap<K, ? extends V> map, IteratorStart start, K key) {
|
||||
_map = (NavigableMap<K, V>) map;
|
||||
public NavigableMapKvIterator(NavigableMap<K, V> map, IteratorStart start, K key) {
|
||||
_map = map;
|
||||
SortedMap<K, V> _view;
|
||||
_goingForward = true;
|
||||
switch (start) {
|
||||
case GE -> _view = _map.tailMap(key, true);
|
||||
case GT -> _view = _map.tailMap(key, false);
|
||||
case GE -> _view = map.tailMap(key, true);
|
||||
case GT -> _view = map.tailMap(key, false);
|
||||
case LE -> {
|
||||
var floorKey = _map.floorKey(key);
|
||||
var floorKey = map.floorKey(key);
|
||||
if (floorKey == null) _view = _map;
|
||||
else _view = _map.tailMap(floorKey, true);
|
||||
else _view = map.tailMap(floorKey, true);
|
||||
}
|
||||
case LT -> {
|
||||
var lowerKey = map.lowerKey(key);
|
||||
if (lowerKey == null) _view = _map;
|
||||
else _view = _map.tailMap(lowerKey, true);
|
||||
else _view = map.tailMap(lowerKey, true);
|
||||
}
|
||||
default -> throw new IllegalArgumentException("Unknown start type");
|
||||
}
|
||||
@@ -104,6 +91,13 @@ public class NavigableMapKvIterator<K extends Comparable<K>, V> extends Reversib
|
||||
return Pair.of(ret);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<? extends V> peekTypeImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException("No more elements");
|
||||
return (Class<? extends V>) _next.getValue().getClass();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
public interface ObjectSerializer<T> {
|
||||
ByteString serialize(T obj);
|
||||
|
||||
T deserialize(ByteString data);
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry {
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry {
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public interface PendingWriteEntry {
|
||||
long bundleId();
|
||||
}
|
||||
@@ -1,7 +1,4 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public interface PreCommitTxHook {
|
||||
default void onChange(JObjectKey key, JData old, JData cur) {
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user