mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-28 20:47:49 +01:00
Compare commits
2 Commits
de211bb2d2
...
gpt-rocks
| Author | SHA1 | Date | |
|---|---|---|---|
| edf1ae85f5 | |||
| b42461f188 |
142
.github/workflows/server.yml
vendored
142
.github/workflows/server.yml
vendored
@@ -20,21 +20,26 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: "recursive"
|
||||
|
||||
- name: Install sudo for ACT
|
||||
run: apt-get update && apt-get install -y sudo
|
||||
if: env.ACT=='true'
|
||||
|
||||
- name: Install FUSE
|
||||
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
|
||||
- name: Install fuse and maven
|
||||
run: sudo apt-get update && sudo apt-get install -y libfuse2
|
||||
|
||||
- name: User allow other for fuse
|
||||
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
|
||||
- name: Download maven
|
||||
run: |
|
||||
cd "$HOME"
|
||||
mkdir maven-bin
|
||||
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
|
||||
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
|
||||
|
||||
- name: Dump fuse.conf
|
||||
run: cat /etc/fuse.conf
|
||||
- name: Maven info
|
||||
run: |
|
||||
echo $GITHUB_PATH
|
||||
echo $PATH
|
||||
mvn -v
|
||||
|
||||
- name: Set up JDK 21
|
||||
uses: actions/setup-java@v4
|
||||
@@ -43,9 +48,6 @@ jobs:
|
||||
distribution: "zulu"
|
||||
cache: maven
|
||||
|
||||
- name: Build LazyFS
|
||||
run: cd thirdparty/lazyfs/ && ./build.sh
|
||||
|
||||
- name: Test with Maven
|
||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
|
||||
|
||||
@@ -55,7 +57,7 @@ jobs:
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-parent/dhfs-fuse/target/quarkus-app
|
||||
path: dhfs-parent/server/target/quarkus-app
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ always() }}
|
||||
@@ -87,6 +89,102 @@ jobs:
|
||||
name: Webui
|
||||
path: webui/dist
|
||||
|
||||
build-native-libs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
cross: "linux/amd64"
|
||||
- os: ubuntu-latest
|
||||
cross: "linux/arm64"
|
||||
- os: macos-latest
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
DO_LOCAL_BUILD: ${{ matrix.os == 'macos-latest' }}
|
||||
DOCKER_PLATFORM: ${{ matrix.cross || 'NATIVE' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set SANITIZED_DOCKER_PLATFORM
|
||||
run: echo "SANITIZED_DOCKER_PLATFORM=$(echo $DOCKER_PLATFORM | tr / _ )" >> $GITHUB_ENV
|
||||
|
||||
- name: Set DOCKER_BUILDER_IMAGE
|
||||
run: echo "DOCKER_BUILDER_IMAGE=dhfs_lib_builder-${{matrix.os}}-$SANITIZED_DOCKER_PLATFORM" >> $GITHUB_ENV
|
||||
|
||||
- name: Build config
|
||||
run: |
|
||||
echo DO_LOCAL_BUILD: $DO_LOCAL_BUILD
|
||||
echo DOCKER_PLATFORM: $DOCKER_PLATFORM
|
||||
echo SANITIZED_DOCKER_PLATFORM: $SANITIZED_DOCKER_PLATFORM
|
||||
echo DOCKER_BUILDER_IMAGE: $DOCKER_BUILDER_IMAGE
|
||||
|
||||
- name: Set up JDK 21
|
||||
if: ${{ env.DO_LOCAL_BUILD == 'TRUE' }}
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
java-version: "21"
|
||||
distribution: "zulu"
|
||||
cache: maven
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Build Docker builder image
|
||||
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./libdhfs_support/builder
|
||||
file: ./libdhfs_support/builder/Dockerfile
|
||||
push: false
|
||||
platforms: ${{ env.DOCKER_PLATFORM }}
|
||||
tags: ${{ env.DOCKER_BUILDER_IMAGE }}
|
||||
cache-from: type=gha,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
|
||||
cache-to: type=gha,mode=max,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
|
||||
load: true
|
||||
|
||||
- name: Build the library
|
||||
run: |
|
||||
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result"
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }}
|
||||
path: result
|
||||
|
||||
merge-native-libs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-native-libs]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: downloaded-libs
|
||||
|
||||
- name: Merge all
|
||||
run: rsync -av downloaded-libs/NativeLib*/* result/
|
||||
|
||||
- name: Check that libs exists
|
||||
run: |
|
||||
test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1
|
||||
|
||||
- name: Upload
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: result
|
||||
|
||||
publish-docker:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -96,7 +194,7 @@ jobs:
|
||||
# with sigstore/fulcio when running outside of PRs.
|
||||
id-token: write
|
||||
|
||||
needs: [build-webui, build-dhfs]
|
||||
needs: [build-webui, merge-native-libs, build-dhfs]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -114,6 +212,12 @@ jobs:
|
||||
name: Webui
|
||||
path: webui-dist-downloaded
|
||||
|
||||
- name: Download native libs
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: dhfs-native-downloaded
|
||||
|
||||
- name: Show all the files
|
||||
run: find .
|
||||
|
||||
@@ -189,7 +293,7 @@ jobs:
|
||||
# with sigstore/fulcio when running outside of PRs.
|
||||
id-token: write
|
||||
|
||||
needs: [build-webui, build-dhfs]
|
||||
needs: [build-webui, merge-native-libs, build-dhfs]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -205,6 +309,11 @@ jobs:
|
||||
name: Webui
|
||||
path: webui-dist-downloaded
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: dhfs-native-downloaded
|
||||
|
||||
- name: Show all the files
|
||||
run: find .
|
||||
|
||||
@@ -212,11 +321,14 @@ jobs:
|
||||
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
|
||||
|
||||
- name: Copy DHFS
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
|
||||
|
||||
- name: Copy Webui
|
||||
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
|
||||
|
||||
- name: Copy Webui
|
||||
run: cp -r ./dhfs-native-downloaded "run-wrapper-out/dhfs/app/NativeLibs"
|
||||
|
||||
- name: Copy run wrapper
|
||||
run: cp -r ./run-wrapper/* "run-wrapper-out/dhfs/app/"
|
||||
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "thirdparty/lazyfs/lazyfs"]
|
||||
path = thirdparty/lazyfs/lazyfs
|
||||
url = git@github.com:dsrhaslab/lazyfs.git
|
||||
@@ -9,6 +9,8 @@ COPY ./dhfs-package-downloaded/*.jar .
|
||||
COPY ./dhfs-package-downloaded/app .
|
||||
COPY ./dhfs-package-downloaded/quarkus .
|
||||
|
||||
WORKDIR /usr/src/app/native-libs
|
||||
COPY ./dhfs-native-downloaded/. .
|
||||
WORKDIR /usr/src/app/webui
|
||||
COPY ./webui-dist-downloaded/. .
|
||||
|
||||
|
||||
@@ -14,9 +14,6 @@ Syncthing and allowing you to stream your files like Google Drive File Stream
|
||||
This is a simple wrapper around the jar/web ui distribution that allows you to run/stop
|
||||
the DHFS server in the background, and update itself (hopefully!)
|
||||
|
||||
## How to use it?
|
||||
## How to use it and how it works?
|
||||
|
||||
|
||||
Unpack the run-wrapper and run the `run` script. The filesystem should be mounted to the `fuse` folder in the run-wrapper root directory.
|
||||
|
||||
Then, a web interface will be available at `losthost:8080`, that can be used to connect with other peers.
|
||||
TODO 😁
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||
<module name="dhfs-fuse" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main"/>
|
||||
<module name="server"/>
|
||||
<option name="VM_PARAMETERS"
|
||||
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011"/>
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*"/>
|
||||
<option name="ENABLED" value="true"/>
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true"/>
|
||||
</method>
|
||||
</configuration>
|
||||
</component>
|
||||
@@ -1,16 +1,18 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
|
||||
<module name="dhfs-fuse" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication"
|
||||
nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main"/>
|
||||
<module name="server"/>
|
||||
<option name="VM_PARAMETERS"
|
||||
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021"/>
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*"/>
|
||||
<option name="ENABLED" value="true"/>
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true"/>
|
||||
</method>
|
||||
</configuration>
|
||||
</component>
|
||||
60
dhfs-parent/autoprotomap/deployment/pom.xml
Normal file
60
dhfs-parent/autoprotomap/deployment/pom.xml
Normal file
@@ -0,0 +1,60 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<name>Autoprotomap - Deployment</name>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc-deployment</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5-internal</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc-deployment</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>default-compile</id>
|
||||
<configuration>
|
||||
<annotationProcessorPaths>
|
||||
<path>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-processor</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
</path>
|
||||
</annotationProcessorPaths>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -0,0 +1,78 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.arc.deployment.GeneratedBeanBuildItem;
|
||||
import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor;
|
||||
import io.quarkus.deployment.annotations.BuildProducer;
|
||||
import io.quarkus.deployment.annotations.BuildStep;
|
||||
import io.quarkus.deployment.builditem.ApplicationIndexBuildItem;
|
||||
import io.quarkus.gizmo.ClassCreator;
|
||||
import io.quarkus.gizmo.SignatureBuilder;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.jboss.jandex.ClassType;
|
||||
import org.jboss.jandex.Type;
|
||||
|
||||
class AutoprotomapProcessor {
|
||||
@BuildStep
|
||||
ProtoIndexBuildItem index(ApplicationIndexBuildItem jandex) {
|
||||
var ret = new ProtoIndexBuildItem();
|
||||
var annot = jandex.getIndex().getAnnotations(ProtoMirror.class);
|
||||
for (var a : annot) {
|
||||
var protoTarget = jandex.getIndex().getClassByName(((ClassType) a.value().value()).name());
|
||||
// if (!messageImplementors.contains(protoTarget))
|
||||
// throw new IllegalArgumentException("Expected " + protoTarget + " to be a proto message");
|
||||
System.out.println("Found: " + a.name().toString() + " at " + protoTarget.name().toString() + " of " + a.target().asClass().name().toString());
|
||||
ret.protoMsgToObj.put(protoTarget, a.target().asClass());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@BuildStep
|
||||
void generateProtoSerializer(ApplicationIndexBuildItem jandex,
|
||||
ProtoIndexBuildItem protoIndex,
|
||||
BuildProducer<GeneratedBeanBuildItem> generatedClasses) {
|
||||
try {
|
||||
for (var o : protoIndex.protoMsgToObj.entrySet()) {
|
||||
System.out.println("Generating " + o.getKey().toString() + " -> " + o.getValue().toString());
|
||||
var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedClasses);
|
||||
|
||||
var msgType = io.quarkus.gizmo.Type.classType(o.getKey().name());
|
||||
var objType = io.quarkus.gizmo.Type.classType(o.getValue().name());
|
||||
|
||||
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
|
||||
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
|
||||
msgType, objType);
|
||||
|
||||
var msgJType = Type.create(o.getKey().name(), Type.Kind.CLASS);
|
||||
var objJType = Type.create(o.getValue().name(), Type.Kind.CLASS);
|
||||
|
||||
try (ClassCreator classCreator = ClassCreator.builder()
|
||||
.className("com.usatiuk.autoprotomap.generated.for" + o.getKey().simpleName())
|
||||
.signature(SignatureBuilder.forClass().addInterface(type))
|
||||
.classOutput(gizmoAdapter)
|
||||
.setFinal(true)
|
||||
.build()) {
|
||||
classCreator.addAnnotation(Singleton.class);
|
||||
|
||||
var generator = new ProtoSerializerGenerator(
|
||||
jandex.getIndex(),
|
||||
protoIndex,
|
||||
classCreator,
|
||||
msgJType,
|
||||
objJType
|
||||
);
|
||||
|
||||
generator.generate();
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(e + "\n");
|
||||
for (var el : e.getStackTrace()) {
|
||||
sb.append(el.toString() + "\n");
|
||||
}
|
||||
System.out.println(sb);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
public class Constants {
|
||||
public static final String FIELD_PREFIX = "_";
|
||||
|
||||
public static String capitalize(String str) {
|
||||
return str.substring(0, 1).toUpperCase() + str.substring(1);
|
||||
}
|
||||
|
||||
public static String stripPrefix(String str, String prefix) {
|
||||
if (str.startsWith(prefix)) {
|
||||
return str.substring(prefix.length());
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface Effect {
|
||||
void apply();
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import io.quarkus.builder.item.SimpleBuildItem;
|
||||
import org.apache.commons.collections4.BidiMap;
|
||||
import org.apache.commons.collections4.bidimap.DualHashBidiMap;
|
||||
import org.jboss.jandex.ClassInfo;
|
||||
|
||||
public final class ProtoIndexBuildItem extends SimpleBuildItem {
|
||||
BidiMap<ClassInfo, ClassInfo> protoMsgToObj = new DualHashBidiMap<>();
|
||||
}
|
||||
@@ -0,0 +1,342 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.Message;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.gizmo.*;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.jboss.jandex.*;
|
||||
import org.jboss.jandex.Type;
|
||||
import org.objectweb.asm.Opcodes;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static com.usatiuk.autoprotomap.deployment.Constants.*;
|
||||
|
||||
public class ProtoSerializerGenerator {
|
||||
private final Index index;
|
||||
private final ProtoIndexBuildItem protoIndex;
|
||||
private final ClassCreator classCreator;
|
||||
private final HashSet<Pair<ClassInfo, ClassInfo>> externalSerializers = new HashSet<>();
|
||||
private final Type topMessageType;
|
||||
private final Type topObjectType;
|
||||
|
||||
public ProtoSerializerGenerator(Index index, ProtoIndexBuildItem protoIndex, ClassCreator classCreator, Type topMessageType, Type topObjectType) {
|
||||
this.index = index;
|
||||
this.protoIndex = protoIndex;
|
||||
this.classCreator = classCreator;
|
||||
this.topMessageType = topMessageType;
|
||||
this.topObjectType = topObjectType;
|
||||
}
|
||||
|
||||
private FieldDescriptor getOutsideSerializer(ClassInfo messageClass, ClassInfo objectClass) {
|
||||
var name = messageClass.name().withoutPackagePrefix() + objectClass.name().withoutPackagePrefix() + "serializer";
|
||||
var msgType = io.quarkus.gizmo.Type.classType(messageClass.name());
|
||||
var objType = io.quarkus.gizmo.Type.classType(objectClass.name());
|
||||
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
|
||||
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
|
||||
msgType, objType);
|
||||
var sig = SignatureBuilder.forField().setType(type).build();
|
||||
var fd = FieldDescriptor.of(classCreator.getClassName(), name, ProtoSerializer.class);
|
||||
if (externalSerializers.add(Pair.of(messageClass, objectClass))) {
|
||||
var fc = classCreator.getFieldCreator(fd);
|
||||
fc.addAnnotation(Inject.class);
|
||||
fc.setSignature(sig);
|
||||
fc.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
private void traverseHierarchy(Index index, ClassInfo klass, Consumer<ClassInfo> visitor) {
|
||||
var cur = klass;
|
||||
while (true) {
|
||||
visitor.accept(cur);
|
||||
|
||||
var next = cur.superClassType().name();
|
||||
if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break;
|
||||
cur = index.getClassByName(next);
|
||||
}
|
||||
}
|
||||
|
||||
private ArrayList<FieldInfo> findAllFields(Index index, ClassInfo klass) {
|
||||
ArrayList<FieldInfo> ret = new ArrayList<>();
|
||||
traverseHierarchy(index, klass, cur -> {
|
||||
ret.addAll(cur.fields());
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
|
||||
private void generateBuilderUse(BytecodeCreator bytecodeCreator,
|
||||
ResultHandle builder,
|
||||
Type messageType, Type objectType,
|
||||
ResultHandle object) {
|
||||
var builderType = Type.create(DotName.createComponentized(messageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var objectClass = index.getClassByName(objectType.name().toString());
|
||||
|
||||
Function<String, String> getterGetter = objectClass.isRecord()
|
||||
? Function.identity()
|
||||
: s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX));
|
||||
|
||||
for (var f : findAllFields(index, objectClass)) {
|
||||
var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX);
|
||||
|
||||
Supplier<ResultHandle> get = () -> {
|
||||
if ((f.flags() & Opcodes.ACC_PUBLIC) != 0)
|
||||
return bytecodeCreator.readInstanceField(f, object);
|
||||
else {
|
||||
var fieldGetter = getterGetter.apply(f.name());
|
||||
return bytecodeCreator.invokeVirtualMethod(
|
||||
MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object);
|
||||
}
|
||||
};
|
||||
|
||||
Effect doSimpleCopy = () -> {
|
||||
var setter = MethodDescriptor.ofMethod(builderType.name().toString(), "set" + capitalize(consideredFieldName),
|
||||
builderType.name().toString(), f.type().toString());
|
||||
|
||||
var val = get.get();
|
||||
bytecodeCreator.invokeVirtualMethod(setter, builder, val);
|
||||
};
|
||||
|
||||
switch (f.type().kind()) {
|
||||
case CLASS -> {
|
||||
if (f.type().equals(Type.create(String.class)) || f.type().equals(Type.create(ByteString.class))) {
|
||||
doSimpleCopy.apply();
|
||||
} else {
|
||||
var builderGetter = "get" + capitalize(f.name()) + "Builder";
|
||||
var protoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(f.type().name()));
|
||||
var nestedBuilderType = Type.create(DotName.createComponentized(protoType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
var nestedBuilder = bytecodeCreator.invokeVirtualMethod(
|
||||
MethodDescriptor.ofMethod(builderType.toString(), builderGetter, nestedBuilderType.name().toString()), builder);
|
||||
|
||||
var val = get.get();
|
||||
|
||||
generateBuilderUse(bytecodeCreator, nestedBuilder, Type.create(protoType.name(), Type.Kind.CLASS), f.type(), val);
|
||||
}
|
||||
}
|
||||
case PRIMITIVE -> {
|
||||
doSimpleCopy.apply();
|
||||
}
|
||||
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
|
||||
case PARAMETERIZED_TYPE ->
|
||||
throw new UnsupportedOperationException("Parametrized types not supported yet");
|
||||
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
|
||||
default -> throw new IllegalStateException("Unexpected type: " + f.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ResultHandle generateConstructorUse(
|
||||
BytecodeCreator bytecodeCreator,
|
||||
ClassCreator classCreator,
|
||||
Type messageType, Type objectType,
|
||||
ResultHandle message
|
||||
) {
|
||||
var constructor = findAllArgsConstructor(index, index.getClassByName(objectType.name()));
|
||||
if (constructor == null) {
|
||||
throw new IllegalStateException("No constructor found for type: " + objectType.name());
|
||||
}
|
||||
var argMap = new ResultHandle[constructor.parametersCount()];
|
||||
|
||||
for (int i = 0; i < argMap.length; i++) {
|
||||
var type = constructor.parameterType(i);
|
||||
var strippedName = stripPrefix(constructor.parameterName(i), FIELD_PREFIX);
|
||||
|
||||
IntConsumer doSimpleCopy = (arg) -> {
|
||||
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
|
||||
type.name().toString());
|
||||
argMap[arg] = bytecodeCreator.invokeVirtualMethod(call, message);
|
||||
};
|
||||
|
||||
switch (type.kind()) {
|
||||
case CLASS -> {
|
||||
if (type.equals(Type.create(String.class)) || type.equals(Type.create(ByteString.class))) {
|
||||
doSimpleCopy.accept(i);
|
||||
} else {
|
||||
var nestedProtoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(type.name()));
|
||||
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
|
||||
nestedProtoType.name().toString());
|
||||
var nested = bytecodeCreator.invokeVirtualMethod(call, message);
|
||||
argMap[i] = generateConstructorUse(bytecodeCreator, classCreator, Type.create(nestedProtoType.name(), Type.Kind.CLASS), type, nested);
|
||||
}
|
||||
}
|
||||
case PRIMITIVE -> {
|
||||
doSimpleCopy.accept(i);
|
||||
}
|
||||
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
|
||||
case PARAMETERIZED_TYPE ->
|
||||
throw new UnsupportedOperationException("Parametrized types not supported yet");
|
||||
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
|
||||
default -> throw new IllegalStateException("Unexpected type: " + type);
|
||||
}
|
||||
}
|
||||
|
||||
return bytecodeCreator.newInstance(constructor, argMap);
|
||||
}
|
||||
|
||||
private MethodInfo findAllArgsConstructor(Index index, ClassInfo klass) {
|
||||
ArrayList<FieldInfo> fields = findAllFields(index, klass);
|
||||
|
||||
var fieldCount = fields.size();
|
||||
var fieldNames = fields.stream().map(f -> stripPrefix(f.name(), FIELD_PREFIX)).sorted().toList();
|
||||
var fieldNameToType = fields.stream().collect(Collectors.toMap(f -> stripPrefix(f.name(), FIELD_PREFIX), FieldInfo::type));
|
||||
|
||||
for (var m : klass.constructors()) {
|
||||
if (m.parametersCount() != fieldCount) continue;
|
||||
var parameterNames = m.parameters().stream().map(n -> stripPrefix(n.name(), FIELD_PREFIX)).sorted().toList();
|
||||
if (!Objects.equals(fieldNames, parameterNames)) continue;
|
||||
|
||||
for (var p : m.parameters()) {
|
||||
if (!Objects.equals(fieldNameToType.get(stripPrefix(p.name(), FIELD_PREFIX)), p.type())) continue;
|
||||
}
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public void generateAbstract() {
|
||||
var kids = Stream.concat(index.getAllKnownSubclasses(topObjectType.name()).stream(),
|
||||
index.getAllKnownImplementors(topObjectType.name()).stream())
|
||||
.filter(k -> !k.isAbstract() && !k.isInterface()).toList();
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("serialize",
|
||||
Message.class, Object.class)) {
|
||||
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
for (var nestedObjClass : kids) {
|
||||
System.out.println("Generating " + nestedObjClass.name() + " serializer for " + topObjectType.name());
|
||||
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
|
||||
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
|
||||
boolean doExternalCall = false;
|
||||
if (nestedMessageClass == null) {
|
||||
var msgInfo = index.getClassByName(topMessageType.name());
|
||||
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
|
||||
doExternalCall = true;
|
||||
}
|
||||
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var statement = method.ifTrue(method.instanceOf(arg, nestedObjClass.name().toString()));
|
||||
|
||||
try (var branch = statement.trueBranch()) {
|
||||
if (doExternalCall) {
|
||||
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
|
||||
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
|
||||
var serialized = branch.invokeInterfaceMethod(
|
||||
MethodDescriptor.ofMethod(ProtoSerializer.class,
|
||||
"serialize", Message.class, Object.class),
|
||||
serializerLoaded, arg);
|
||||
branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
|
||||
"set" + capitalize(nestedObjType.name().withoutPackagePrefix()),
|
||||
builderType.name().toString(), nestedMessageType.name().toString()), builder, serialized);
|
||||
} else {
|
||||
var nestedBuilderType = Type.create(DotName.createComponentized(nestedMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
var nestedBuilder = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
|
||||
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()) + "Builder",
|
||||
nestedBuilderType.name().toString()), builder);
|
||||
generateBuilderUse(branch, nestedBuilder, nestedMessageType, nestedObjType, arg);
|
||||
}
|
||||
var result = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
|
||||
branch.returnValue(result);
|
||||
}
|
||||
}
|
||||
method.throwException(IllegalArgumentException.class, "Unknown object type");
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
|
||||
Object.class, Message.class)) {
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
for (var nestedObjClass : kids) {
|
||||
System.out.println("Generating " + nestedObjClass.name() + " deserializer for " + topObjectType.name());
|
||||
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
|
||||
boolean doExternalCall = false;
|
||||
if (nestedMessageClass == null) {
|
||||
var msgInfo = index.getClassByName(topMessageType.name());
|
||||
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
|
||||
doExternalCall = true;
|
||||
}
|
||||
|
||||
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var typeCheck = method.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
|
||||
"has" + capitalize(nestedObjType.name().withoutPackagePrefix()), boolean.class), arg);
|
||||
|
||||
var statement = method.ifTrue(typeCheck);
|
||||
|
||||
try (var branch = statement.trueBranch()) {
|
||||
var nestedMessage = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
|
||||
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()), nestedMessageType.name().toString()), arg);
|
||||
if (doExternalCall) {
|
||||
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
|
||||
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
|
||||
branch.returnValue(branch.invokeInterfaceMethod(
|
||||
MethodDescriptor.ofMethod(ProtoSerializer.class,
|
||||
"deserialize", Object.class, Message.class),
|
||||
serializerLoaded, nestedMessage));
|
||||
} else {
|
||||
branch.returnValue(generateConstructorUse(branch, classCreator, nestedMessageType, nestedObjType, nestedMessage));
|
||||
}
|
||||
}
|
||||
}
|
||||
method.throwException(IllegalArgumentException.class, "Unknown object type");
|
||||
}
|
||||
}
|
||||
|
||||
public void generate() {
|
||||
var objInfo = index.getClassByName(topObjectType.name());
|
||||
if (objInfo.isAbstract() || objInfo.isInterface()) {
|
||||
generateAbstract();
|
||||
return;
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("serialize",
|
||||
Message.class, Object.class)) {
|
||||
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
generateBuilderUse(method, builder, topMessageType, topObjectType, arg);
|
||||
|
||||
var result = method.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
|
||||
|
||||
method.returnValue(result);
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
|
||||
Object.class, Message.class)) {
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
method.returnValue(generateConstructorUse(method, classCreator, topMessageType, topObjectType, arg));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.usatiuk.autoprotomap.test;
|
||||
|
||||
import io.quarkus.test.QuarkusDevModeTest;
|
||||
import org.jboss.shrinkwrap.api.ShrinkWrap;
|
||||
import org.jboss.shrinkwrap.api.spec.JavaArchive;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
||||
public class AutoprotomapDevModeTest {
|
||||
|
||||
// Start hot reload (DevMode) test with your extension loaded
|
||||
@RegisterExtension
|
||||
static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest()
|
||||
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
|
||||
|
||||
@Test
|
||||
public void writeYourOwnDevModeTest() {
|
||||
// Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information
|
||||
Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.usatiuk.autoprotomap.test;
|
||||
|
||||
import io.quarkus.test.QuarkusUnitTest;
|
||||
import org.jboss.shrinkwrap.api.ShrinkWrap;
|
||||
import org.jboss.shrinkwrap.api.spec.JavaArchive;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
||||
public class AutoprotomapTest {
|
||||
|
||||
// Start unit test with your extension loaded
|
||||
@RegisterExtension
|
||||
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
|
||||
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
|
||||
|
||||
@Test
|
||||
public void writeYourOwnUnitTest() {
|
||||
// Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information
|
||||
Assertions.assertTrue(true, "Add some assertions to " + getClass().getName());
|
||||
}
|
||||
}
|
||||
107
dhfs-parent/autoprotomap/integration-tests/pom.xml
Normal file
107
dhfs-parent/autoprotomap/integration-tests/pom.xml
Normal file
@@ -0,0 +1,107 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>autoprotomap-integration-tests</artifactId>
|
||||
<name>Autoprotomap - Integration Tests</name>
|
||||
|
||||
<properties>
|
||||
<skipITs>true</skipITs>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>integration-test</goal>
|
||||
<goal>verify</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<native.image.path>${project.build.directory}/${project.build.finalName}-runner
|
||||
</native.image.path>
|
||||
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
|
||||
<maven.home>${maven.home}</maven.home>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>native</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>native</name>
|
||||
</property>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipTests>${native.surefire.skip}</skipTests>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<properties>
|
||||
<skipITs>false</skipITs>
|
||||
<quarkus.native.enabled>true</quarkus.native.enabled>
|
||||
</properties>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(AbstractProto.class)
|
||||
public abstract class AbstractObject {
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class CustomObject extends AbstractObject {
|
||||
public int testNum = 0;
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class CustomObjectSerializer implements ProtoSerializer<CustomObjectProto, CustomObject> {
|
||||
@Override
|
||||
public CustomObject deserialize(CustomObjectProto message) {
|
||||
return new CustomObject(2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CustomObjectProto serialize(CustomObject object) {
|
||||
return CustomObjectProto.newBuilder().setTest(1).build();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(InterfaceObjectProto.class)
|
||||
public interface InterfaceObject {
|
||||
String key();
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@ProtoMirror(NestedObjectProto.class)
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class NestedObject extends AbstractObject {
|
||||
public SimpleObject object;
|
||||
public String _nestedName;
|
||||
public ByteString _nestedSomeBytes;
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(RecordObjectProto.class)
|
||||
public record RecordObject(String key) implements InterfaceObject {
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(RecordObject2Proto.class)
|
||||
public record RecordObject2(String key, int value) implements InterfaceObject {
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@ProtoMirror(SimpleObjectProto.class)
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class SimpleObject extends AbstractObject {
|
||||
public int numfield = 0;
|
||||
private String name;
|
||||
public ByteString someBytes;
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_package = "com.usatiuk.autoprotomap.it";
|
||||
option java_outer_classname = "TestProto";
|
||||
|
||||
package autoprotomap.test;
|
||||
|
||||
message SimpleObjectProto {
|
||||
int32 numfield = 1;
|
||||
string name = 2;
|
||||
bytes someBytes = 3;
|
||||
}
|
||||
|
||||
message NestedObjectProto {
|
||||
SimpleObjectProto object = 1;
|
||||
string nestedName = 2;
|
||||
bytes nestedSomeBytes = 3;
|
||||
}
|
||||
|
||||
message CustomObjectProto {
|
||||
int64 test = 1;
|
||||
}
|
||||
|
||||
message AbstractProto {
|
||||
oneof obj {
|
||||
NestedObjectProto nestedObject = 1;
|
||||
SimpleObjectProto simpleObject = 2;
|
||||
CustomObjectProto customObject = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message RecordObjectProto {
|
||||
string key = 1;
|
||||
}
|
||||
|
||||
message RecordObject2Proto {
|
||||
string key = 1;
|
||||
int32 value = 2;
|
||||
}
|
||||
|
||||
message InterfaceObjectProto {
|
||||
oneof obj {
|
||||
RecordObjectProto recordObject = 1;
|
||||
RecordObject2Proto recordObject2 = 2;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusIntegrationTest;
|
||||
|
||||
@QuarkusIntegrationTest
|
||||
public class AutoprotomapResourceIT extends AutoprotomapResourceTest {
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
|
||||
@QuarkusTest
|
||||
public class AutoprotomapResourceTest {
|
||||
@Inject
|
||||
ProtoSerializer<SimpleObjectProto, SimpleObject> simpleProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<NestedObjectProto, NestedObject> nestedProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<AbstractProto, AbstractObject> abstractProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<InterfaceObjectProto, InterfaceObject> interfaceProtoSerializer;
|
||||
|
||||
@Test
|
||||
public void testSimple() {
|
||||
var ret = simpleProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
|
||||
Assertions.assertEquals(1234, ret.getNumfield());
|
||||
Assertions.assertEquals("simple test", ret.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSomeBytes());
|
||||
|
||||
var des = simpleProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(1234, des.getNumfield());
|
||||
Assertions.assertEquals("simple test", des.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNested() {
|
||||
var ret = nestedProtoSerializer.serialize(
|
||||
new NestedObject(
|
||||
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
|
||||
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
|
||||
Assertions.assertEquals(333, ret.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", ret.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", ret.getNestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedSomeBytes());
|
||||
|
||||
var des = nestedProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(333, des.object.numfield);
|
||||
Assertions.assertEquals(333, des.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", des.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", des.get_nestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractSimple() {
|
||||
var ret = abstractProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
|
||||
Assertions.assertEquals(1234, ret.getSimpleObject().getNumfield());
|
||||
Assertions.assertEquals("simple test", ret.getSimpleObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSimpleObject().getSomeBytes());
|
||||
|
||||
var des = (SimpleObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(1234, des.getNumfield());
|
||||
Assertions.assertEquals("simple test", des.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractCustom() {
|
||||
var ret = abstractProtoSerializer.serialize(new CustomObject(1234));
|
||||
Assertions.assertEquals(1, ret.getCustomObject().getTest());
|
||||
|
||||
var des = (CustomObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(2, des.getTestNum());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractNested() {
|
||||
var ret = abstractProtoSerializer.serialize(
|
||||
new NestedObject(
|
||||
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
|
||||
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
|
||||
Assertions.assertEquals(333, ret.getNestedObject().getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", ret.getNestedObject().getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getNestedObject().getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", ret.getNestedObject().getNestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedObject().getNestedSomeBytes());
|
||||
|
||||
var des = (NestedObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(333, des.object.numfield);
|
||||
Assertions.assertEquals(333, des.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", des.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", des.get_nestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInterface() {
|
||||
var ret = interfaceProtoSerializer.serialize(new RecordObject("record test"));
|
||||
Assertions.assertEquals("record test", ret.getRecordObject().getKey());
|
||||
var des = (RecordObject) interfaceProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals("record test", des.key());
|
||||
|
||||
var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234));
|
||||
Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey());
|
||||
Assertions.assertEquals(1234, ret2.getRecordObject2().getValue());
|
||||
var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2);
|
||||
Assertions.assertEquals("record test 2", des2.key());
|
||||
Assertions.assertEquals(1234, des2.value());
|
||||
}
|
||||
}
|
||||
24
dhfs-parent/autoprotomap/pom.xml
Normal file
24
dhfs-parent/autoprotomap/pom.xml
Normal file
@@ -0,0 +1,24 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>Autoprotomap - Parent</name>
|
||||
|
||||
<modules>
|
||||
<module>deployment</module>
|
||||
<module>runtime</module>
|
||||
<module>integration-tests</module>
|
||||
</modules>
|
||||
|
||||
</project>
|
||||
63
dhfs-parent/autoprotomap/runtime/pom.xml
Normal file
63
dhfs-parent/autoprotomap/runtime/pom.xml
Normal file
@@ -0,0 +1,63 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<name>Autoprotomap - Runtime</name>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>extension-descriptor</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<deployment>${project.groupId}:${project.artifactId}-deployment:${project.version}
|
||||
</deployment>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>default-compile</id>
|
||||
<configuration>
|
||||
<annotationProcessorPaths>
|
||||
<path>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-processor</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
</path>
|
||||
</annotationProcessorPaths>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -0,0 +1,12 @@
|
||||
package com.usatiuk.autoprotomap.runtime;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.CLASS)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface ProtoMirror {
|
||||
Class<?> value() default Object.class;
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs;
|
||||
package com.usatiuk.autoprotomap.runtime;
|
||||
|
||||
import com.google.protobuf.Message;
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
name: Autoprotomap
|
||||
#description: Do something useful.
|
||||
metadata:
|
||||
# keywords:
|
||||
# - autoprotomap
|
||||
# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension
|
||||
# categories:
|
||||
# - "miscellaneous"
|
||||
# status: "preview"
|
||||
@@ -1,164 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.openhft</groupId>
|
||||
<artifactId>zero-allocation-hashing</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-scheduler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.slf4j</groupId>
|
||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
true
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
</junit.jupiter.execution.parallel.mode.default>
|
||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
0.5
|
||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<extensions>true</extensions>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,18 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaDirectory(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
|
||||
@Override
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaFile(name, fileIno);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return List.of(fileIno);
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
abstract public class TempDataProfile implements QuarkusTestProfile {
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
}
|
||||
|
||||
@Override
|
||||
final public Map<String, String> getConfigOverrides() {
|
||||
Path tempDirWithPrefix;
|
||||
try {
|
||||
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var ret = new HashMap<String, String>();
|
||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
||||
getConfigOverrides(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Objects;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TestDataCleaner {
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
String tempDirectory;
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
||||
try {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
} catch (Exception ignored) {
|
||||
Log.warn("Couldn't cleanup test data on init");
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
public void purgeDirectory(File dir) {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
*
|
||||
!target/*-runner
|
||||
!target/*-runner.jar
|
||||
!target/lib/*
|
||||
!target/quarkus-app/*
|
||||
43
dhfs-parent/dhfs-fuse/.gitignore
vendored
43
dhfs-parent/dhfs-fuse/.gitignore
vendored
@@ -1,43 +0,0 @@
|
||||
#Maven
|
||||
target/
|
||||
pom.xml.tag
|
||||
pom.xml.releaseBackup
|
||||
pom.xml.versionsBackup
|
||||
release.properties
|
||||
.flattened-pom.xml
|
||||
|
||||
# Eclipse
|
||||
.project
|
||||
.classpath
|
||||
.settings/
|
||||
bin/
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
*.ipr
|
||||
*.iml
|
||||
*.iws
|
||||
|
||||
# NetBeans
|
||||
nb-configuration.xml
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode
|
||||
.factorypath
|
||||
|
||||
# OSX
|
||||
.DS_Store
|
||||
|
||||
# Vim
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# patch
|
||||
*.orig
|
||||
*.rej
|
||||
|
||||
# Local environment
|
||||
.env
|
||||
|
||||
# Plugin directory
|
||||
/.quarkus/cli/plugins/
|
||||
@@ -1,2 +0,0 @@
|
||||
FROM azul/zulu-openjdk-debian:21-jre-latest
|
||||
RUN apt update && apt install -y libfuse2 curl
|
||||
@@ -1,43 +0,0 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
dhfs1:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs1:/dhfs_root
|
||||
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 8081:8443
|
||||
- 5005:5005
|
||||
dhfs2:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs2:/dhfs_root
|
||||
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8090:8080
|
||||
- 8091:8443
|
||||
- 5010:5010
|
||||
@@ -1,172 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fuse</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.openhft</groupId>
|
||||
<artifactId>zero-allocation-hashing</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-scheduler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.serceman</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>0.5.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.slf4j</groupId>
|
||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>0.5C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<extensions>true</extensions>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,97 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
# We make four distinct layers so if there are application changes the library layers can be re-used
|
||||
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
|
||||
COPY --chown=185 target/quarkus-app/*.jar /deployments/
|
||||
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
|
||||
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
COPY target/lib/* /deployments/lib/
|
||||
COPY target/*-runner.jar /deployments/quarkus-run.jar
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
@@ -1,27 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -1,30 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
# It uses a micro base image, tuned for Quarkus native executables.
|
||||
# It reduces the size of the resulting container image.
|
||||
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM quay.io/quarkus/quarkus-micro-image:2.0
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -1,35 +0,0 @@
|
||||
quarkus.grpc.server.use-separate-server=false
|
||||
dhfs.objects.peerdiscovery.port=42069
|
||||
dhfs.objects.peerdiscovery.interval=4s
|
||||
dhfs.objects.peerdiscovery.broadcast=true
|
||||
dhfs.objects.sync.timeout=30
|
||||
dhfs.objects.sync.ping.timeout=5
|
||||
dhfs.objects.invalidation.threads=16
|
||||
dhfs.objects.invalidation.delay=1000
|
||||
dhfs.objects.reconnect_interval=5s
|
||||
dhfs.objects.write_log=false
|
||||
dhfs.objects.periodic-push-op-interval=5m
|
||||
dhfs.fuse.root=${HOME}/dhfs_default/fuse
|
||||
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=524288
|
||||
dhfs.files.max_chunk_size=524288
|
||||
dhfs.files.target_chunk_alignment=17
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.files.use_hash_for_chunks=false
|
||||
dhfs.objects.autosync.threads=16
|
||||
dhfs.objects.autosync.download-all=false
|
||||
dhfs.objects.move-processor.threads=16
|
||||
dhfs.objects.ref-processor.threads=16
|
||||
dhfs.objects.opsender.batch-size=100
|
||||
dhfs.objects.lock_timeout_secs=2
|
||||
dhfs.local-discovery=true
|
||||
dhfs.peerdiscovery.timeout=10000
|
||||
quarkus.log.category."com.usatiuk".min-level=TRACE
|
||||
quarkus.log.category."com.usatiuk".level=TRACE
|
||||
quarkus.http.insecure-requests=enabled
|
||||
quarkus.http.ssl.client-auth=required
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
abstract public class TempDataProfile implements QuarkusTestProfile {
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
}
|
||||
|
||||
@Override
|
||||
final public Map<String, String> getConfigOverrides() {
|
||||
Path tempDirWithPrefix;
|
||||
try {
|
||||
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var ret = new HashMap<String, String>();
|
||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
||||
getConfigOverrides(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Objects;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TestDataCleaner {
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
String tempDirectory;
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
||||
try {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
} catch (Exception ignored) {
|
||||
Log.warn("Couldn't cleanup test data on init");
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
public static void purgeDirectory(File dir) {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testcontainers.DockerClientFactory;
|
||||
import org.testcontainers.containers.GenericContainer;
|
||||
import org.testcontainers.containers.Network;
|
||||
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||
import org.testcontainers.containers.output.WaitingConsumer;
|
||||
import org.testcontainers.containers.wait.strategy.Wait;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
public class KillIT {
|
||||
GenericContainer<?> container1;
|
||||
GenericContainer<?> container2;
|
||||
|
||||
WaitingConsumer waitingConsumer1;
|
||||
WaitingConsumer waitingConsumer2;
|
||||
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
File data1;
|
||||
File data2;
|
||||
|
||||
Network network;
|
||||
|
||||
ExecutorService executor;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
executor = Executors.newCachedThreadPool();
|
||||
|
||||
data1 = Files.createTempDirectory("").toFile();
|
||||
data2 = Files.createTempDirectory("").toFile();
|
||||
|
||||
network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
|
||||
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
executor.close();
|
||||
network.close();
|
||||
}
|
||||
|
||||
private void checkConsistency() {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTest(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.start();
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.start();
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTest2(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
|
||||
container2.start();
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs2(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
|
||||
container2.start();
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
}
|
||||
@@ -1,215 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class LazyFs {
|
||||
private static final String lazyFsPath;
|
||||
|
||||
static {
|
||||
lazyFsPath = System.getProperty("lazyFsPath");
|
||||
System.out.println("LazyFs Path: " + lazyFsPath);
|
||||
}
|
||||
|
||||
private final String mountRoot;
|
||||
private final String dataRoot;
|
||||
private final String name;
|
||||
private final File configFile;
|
||||
private final File fifoFile;
|
||||
private Thread errPiper;
|
||||
private Thread outPiper;
|
||||
private CountDownLatch startLatch;
|
||||
private Process fs;
|
||||
public LazyFs(String name, String mountRoot, String dataRoot) {
|
||||
this.name = name;
|
||||
this.mountRoot = mountRoot;
|
||||
this.dataRoot = dataRoot;
|
||||
|
||||
try {
|
||||
configFile = File.createTempFile("lazyfs", ".conf");
|
||||
configFile.deleteOnExit();
|
||||
|
||||
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
|
||||
fifoFile.deleteOnExit();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
|
||||
}
|
||||
|
||||
private String fifoPath() {
|
||||
return fifoFile.getAbsolutePath();
|
||||
}
|
||||
|
||||
public void start(String extraOpts) {
|
||||
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
|
||||
if (!lfsPath.toFile().isFile())
|
||||
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
|
||||
if (!lfsPath.toFile().canExecute())
|
||||
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
|
||||
|
||||
try (var rwFile = new RandomAccessFile(configFile, "rw");
|
||||
var channel = rwFile.getChannel()) {
|
||||
channel.truncate(0);
|
||||
var config = "[faults]\n" +
|
||||
"fifo_path=\"" + fifoPath() + "\"\n" +
|
||||
"[cache]\n" +
|
||||
"apply_eviction=false\n" +
|
||||
"[cache.simple]\n" +
|
||||
"custom_size=\"1gb\"\n" +
|
||||
"blocks_per_page=1\n" +
|
||||
"[filesystem]\n" +
|
||||
"log_all_operations=false\n" +
|
||||
"logfile=\"\"\n" + extraOpts;
|
||||
rwFile.write(config.getBytes());
|
||||
Log.info("LazyFs config: \n" + config);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
var argList = new ArrayList<String>();
|
||||
|
||||
argList.add(lfsPath.toString());
|
||||
argList.add(Path.of(mountRoot).toString());
|
||||
argList.add("--config-path");
|
||||
argList.add(configFile.getAbsolutePath());
|
||||
argList.add("-o");
|
||||
argList.add("allow_other");
|
||||
argList.add("-o");
|
||||
argList.add("modules=subdir");
|
||||
argList.add("-o");
|
||||
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
|
||||
try {
|
||||
Log.info("Starting LazyFs " + argList);
|
||||
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
startLatch = new CountDownLatch(1);
|
||||
|
||||
outPiper = new Thread(() -> {
|
||||
try {
|
||||
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
|
||||
String line;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
if (line.contains("running LazyFS"))
|
||||
startLatch.countDown();
|
||||
System.out.println(line);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.info("Exception in LazyFs piper", e);
|
||||
}
|
||||
Log.info("LazyFs out piper finished");
|
||||
});
|
||||
outPiper.start();
|
||||
errPiper = new Thread(() -> {
|
||||
try {
|
||||
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
|
||||
String line;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
System.out.println(line);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.info("Exception in LazyFs piper", e);
|
||||
}
|
||||
Log.info("LazyFs err piper finished");
|
||||
});
|
||||
errPiper.start();
|
||||
|
||||
try {
|
||||
if (!startLatch.await(30, TimeUnit.SECONDS))
|
||||
throw new RuntimeException("StartLatch timed out");
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
Log.info("LazyFs started");
|
||||
}
|
||||
|
||||
public void start() {
|
||||
start("");
|
||||
}
|
||||
|
||||
private String mdbPath() {
|
||||
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
|
||||
}
|
||||
|
||||
public void startTornOp() {
|
||||
start("\n" +
|
||||
"[[injection]]\n" +
|
||||
"type=\"torn-seq\"\n" +
|
||||
"op=\"write\"\n" +
|
||||
"file=\"" + mdbPath() + "\"\n" +
|
||||
"persist=[1,4]\n" +
|
||||
"occurrence=3");
|
||||
}
|
||||
|
||||
public void startTornSeq() {
|
||||
start("[[injection]]\n" +
|
||||
"type=\"torn-op\"\n" +
|
||||
"file=\"" + mdbPath() + "\"\n" +
|
||||
"occurrence=3\n" +
|
||||
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
|
||||
"persist=[1,3]");
|
||||
}
|
||||
|
||||
public void crash() {
|
||||
try {
|
||||
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
|
||||
Log.info("Running command: " + cmd);
|
||||
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
try {
|
||||
synchronized (this) {
|
||||
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Doesn't actually work?
|
||||
//
|
||||
// public void crashop() {
|
||||
// try {
|
||||
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
|
||||
// System.out.println("Running command: " + cmd);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||
// Thread.sleep(1000);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||
// Thread.sleep(1000);
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// public void crashseq() {
|
||||
// try {
|
||||
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
|
||||
// System.out.println("Running command: " + cmd);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||
// Thread.sleep(1000);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||
// Thread.sleep(1000);
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
@@ -1,489 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsfuse.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testcontainers.DockerClientFactory;
|
||||
import org.testcontainers.containers.GenericContainer;
|
||||
import org.testcontainers.containers.Network;
|
||||
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||
import org.testcontainers.containers.output.WaitingConsumer;
|
||||
import org.testcontainers.containers.wait.strategy.Wait;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
public class LazyFsIT {
|
||||
GenericContainer<?> container1;
|
||||
GenericContainer<?> container2;
|
||||
|
||||
WaitingConsumer waitingConsumer1;
|
||||
WaitingConsumer waitingConsumer2;
|
||||
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
File data1;
|
||||
File data2;
|
||||
File data1Lazy;
|
||||
File data2Lazy;
|
||||
|
||||
LazyFs lazyFs1;
|
||||
LazyFs lazyFs2;
|
||||
|
||||
ExecutorService executor;
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
executor = Executors.newCachedThreadPool();
|
||||
data1 = Files.createTempDirectory("dhfsdata").toFile();
|
||||
data2 = Files.createTempDirectory("dhfsdata").toFile();
|
||||
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||
|
||||
network = Network.newNetwork();
|
||||
|
||||
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
|
||||
lazyFs1.start();
|
||||
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
|
||||
lazyFs2.start();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
|
||||
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
lazyFs1.stop();
|
||||
lazyFs2.stop();
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data1Lazy);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
TestDataCleaner.purgeDirectory(data2Lazy);
|
||||
|
||||
executor.close();
|
||||
network.close();
|
||||
}
|
||||
|
||||
private void checkConsistency(String testName) {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info("Listing consistency " + testName + "\n"
|
||||
+ ls1 + "\n"
|
||||
+ cat1 + "\n"
|
||||
+ ls2 + "\n"
|
||||
+ cat2 + "\n");
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
|
||||
});
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs1.start();
|
||||
case TORN_OP -> lazyFs1.startTornOp();
|
||||
case TORN_SEQ -> lazyFs1.startTornSeq();
|
||||
}
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
Thread.sleep(3000);
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs1.start();
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs1.start();
|
||||
case TORN_OP -> lazyFs1.startTornOp();
|
||||
case TORN_SEQ -> lazyFs1.startTornSeq();
|
||||
}
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
Thread.sleep(3000);
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs1.start();
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs2.start();
|
||||
case TORN_OP -> lazyFs2.startTornOp();
|
||||
case TORN_SEQ -> lazyFs2.startTornSeq();
|
||||
}
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
var barrier2 = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier2.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier2.await();
|
||||
Log.info("Killing");
|
||||
Thread.sleep(3000);
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
lazyFs2.crash();
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs2.start();
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs2.start();
|
||||
case TORN_OP -> lazyFs2.startTornOp();
|
||||
case TORN_SEQ -> lazyFs2.startTornSeq();
|
||||
}
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
var barrier2 = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier2.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier2.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
lazyFs2.crash();
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs2.start();
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
|
||||
private static enum CrashType {
|
||||
CRASH,
|
||||
TORN_OP,
|
||||
TORN_SEQ
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
|
||||
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
|
||||
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.objects.deletion.delay=0
|
||||
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
|
||||
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
|
||||
quarkus.http.test-port=0
|
||||
quarkus.http.test-ssl-port=0
|
||||
dhfs.local-discovery=false
|
||||
dhfs.objects.persistence.snapshot-extra-checks=true
|
||||
@@ -1,5 +1,7 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import jakarta.annotation.Nonnull;
|
||||
import jakarta.annotation.Nullable;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
@@ -15,6 +17,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
private final PeerInterface<PeerIdT> _peers;
|
||||
private final Clock<TimestampT> _clock;
|
||||
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
|
||||
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
|
||||
|
||||
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
|
||||
PeerInterface<PeerIdT> peers,
|
||||
@@ -86,6 +89,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
node.withParent(null)
|
||||
.withLastEffectiveOp(null)
|
||||
);
|
||||
_undoCtx.put(node.key(), node);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,16 +217,31 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
assert cmp != 0;
|
||||
if (cmp < 0) {
|
||||
if (log.containsKey(op.timestamp())) return;
|
||||
var toUndo = log.newestSlice(op.timestamp(), false);
|
||||
for (var entry : toUndo.reversed()) {
|
||||
undoOp(entry.getValue());
|
||||
try {
|
||||
if (log.containsKey(op.timestamp())) return;
|
||||
var toUndo = log.newestSlice(op.timestamp(), false);
|
||||
_undoCtx = new HashMap<>();
|
||||
for (var entry : toUndo.reversed()) {
|
||||
undoOp(entry.getValue());
|
||||
}
|
||||
try {
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
} finally {
|
||||
for (var entry : toUndo) {
|
||||
redoOp(entry);
|
||||
}
|
||||
|
||||
if (!_undoCtx.isEmpty()) {
|
||||
for (var e : _undoCtx.entrySet()) {
|
||||
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
|
||||
_storage.removeNode(e.getKey());
|
||||
}
|
||||
}
|
||||
_undoCtx = null;
|
||||
}
|
||||
} finally {
|
||||
tryTrimLog();
|
||||
}
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
for (var entry : toUndo) {
|
||||
redoOp(entry);
|
||||
}
|
||||
tryTrimLog();
|
||||
} else {
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
tryTrimLog();
|
||||
@@ -245,7 +264,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
} catch (AlreadyExistsException aex) {
|
||||
throw aex;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Error computing effects for op " + op.toString(), e);
|
||||
LOGGER.log(Level.SEVERE, "Error computing effects for op" + op.toString(), e);
|
||||
computed = new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
if (computed.effects() != null)
|
||||
@@ -254,6 +274,24 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
|
||||
if (_undoCtx != null) {
|
||||
var node = _undoCtx.get(key);
|
||||
if (node != null) {
|
||||
try {
|
||||
if (!node.children().isEmpty()) {
|
||||
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
|
||||
}
|
||||
node = node.withParent(parent).withMeta(meta);
|
||||
} catch (Exception e) {
|
||||
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
|
||||
node = null;
|
||||
}
|
||||
}
|
||||
if (node != null) {
|
||||
_undoCtx.remove(key);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
return _storage.createNewNode(key, parent, meta);
|
||||
}
|
||||
|
||||
@@ -334,6 +372,10 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
var conflictNode = _storage.getById(conflictNodeId);
|
||||
MetaT conflictNodeMeta = conflictNode.meta();
|
||||
|
||||
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
LOGGER.finer(() -> "Node creation conflict: " + conflictNode);
|
||||
|
||||
String newConflictNodeName = op.newName() + ".conflict." + conflictNode.key();
|
||||
@@ -358,14 +400,18 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
if (oldMeta != null
|
||||
&& op.newMeta() != null
|
||||
&& !oldMeta.getClass().equals(op.newMeta().getClass())) {
|
||||
throw new RuntimeException("Class mismatch for meta for node " + node.key());
|
||||
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
var replaceNodeId = newParent.children().get(op.newName());
|
||||
if (replaceNodeId != null) {
|
||||
var replaceNode = _storage.getById(replaceNodeId);
|
||||
var replaceNodeMeta = replaceNode.meta();
|
||||
|
||||
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
LOGGER.finer(() -> "Node replacement: " + replaceNode);
|
||||
|
||||
return new LogRecord<>(op, List.of(
|
||||
|
||||
@@ -10,14 +10,14 @@ public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT exten
|
||||
NodeIdT childId) implements Serializable {
|
||||
public String oldName() {
|
||||
if (oldInfo.oldMeta() != null) {
|
||||
return oldInfo.oldMeta().name();
|
||||
return oldInfo.oldMeta().getName();
|
||||
}
|
||||
return childId.toString();
|
||||
}
|
||||
|
||||
public String newName() {
|
||||
if (newMeta != null) {
|
||||
return newMeta.name();
|
||||
return newMeta.getName();
|
||||
}
|
||||
return childId.toString();
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package com.usatiuk.kleppmanntree;
|
||||
import java.io.Serializable;
|
||||
|
||||
public interface NodeMeta extends Serializable {
|
||||
String name();
|
||||
String getName();
|
||||
|
||||
NodeMeta withName(String name);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends
|
||||
NodeIdT childId) implements Serializable {
|
||||
public String newName() {
|
||||
if (newMeta != null)
|
||||
return newMeta.name();
|
||||
return newMeta.getName();
|
||||
return childId.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT ext
|
||||
|
||||
default String name() {
|
||||
var meta = meta();
|
||||
if (meta != null) return meta.name();
|
||||
if (meta != null) return meta.getName();
|
||||
return key().toString();
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ public abstract class TestNodeMeta implements NodeMeta {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
public String getName() {
|
||||
return _name;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,11 +18,6 @@
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>net.jqwik</groupId>
|
||||
<artifactId>jqwik</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
@@ -59,6 +54,11 @@
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>supportlib</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5-mockito</artifactId>
|
||||
@@ -69,6 +69,11 @@
|
||||
<artifactId>lmdbjava</artifactId>
|
||||
<version>0.9.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.rocksdb</groupId>
|
||||
<artifactId>rocksdbjni</artifactId>
|
||||
<version>9.10.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
@@ -88,11 +93,6 @@
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@@ -104,6 +104,7 @@
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
|
||||
@@ -2,6 +2,11 @@ package com.usatiuk.objects;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
// TODO: This could be maybe moved to a separate module?
|
||||
// The base class for JObject data
|
||||
// Only one instance of this "exists" per key, the instance in the manager is canonical
|
||||
// When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred.
|
||||
// It is immutable, its version is filled in by the allocator from the AllocVersionProvider
|
||||
public interface JData extends Serializable {
|
||||
JObjectKey key();
|
||||
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
package com.usatiuk.objects;
|
||||
|
||||
import com.usatiuk.objects.iterators.Data;
|
||||
|
||||
public sealed interface JDataVersionedWrapper extends Data<JDataVersionedWrapper> permits JDataVersionedWrapperLazy, JDataVersionedWrapperImpl {
|
||||
@Override
|
||||
default JDataVersionedWrapper value() {
|
||||
return this;
|
||||
}
|
||||
|
||||
public interface JDataVersionedWrapper {
|
||||
JData data();
|
||||
|
||||
long version();
|
||||
|
||||
@@ -2,11 +2,11 @@ package com.usatiuk.objects;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public final class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
|
||||
public class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
|
||||
private final long _version;
|
||||
private final int _estimatedSize;
|
||||
private JData _data;
|
||||
private Supplier<JData> _producer;
|
||||
private JData _data;
|
||||
|
||||
public JDataVersionedWrapperLazy(long version, int estimatedSize, Supplier<JData> producer) {
|
||||
_version = version;
|
||||
@@ -14,18 +14,6 @@ public final class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
|
||||
_producer = producer;
|
||||
}
|
||||
|
||||
public void setCacheCallback(Runnable cacheCallback) {
|
||||
if (_data != null) {
|
||||
throw new IllegalStateException("Cache callback can be set only before data is loaded");
|
||||
}
|
||||
var oldProducer = _producer;
|
||||
_producer = () -> {
|
||||
var ret = oldProducer.get();
|
||||
cacheCallback.run();
|
||||
return ret;
|
||||
};
|
||||
}
|
||||
|
||||
public JData data() {
|
||||
if (_data != null)
|
||||
return _data;
|
||||
|
||||
@@ -2,16 +2,17 @@ package com.usatiuk.objects;
|
||||
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
@Singleton
|
||||
public class JDataVersionedWrapperSerializer {
|
||||
@ApplicationScoped
|
||||
public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVersionedWrapper> {
|
||||
@Inject
|
||||
ObjectSerializer<JData> dataSerializer;
|
||||
|
||||
@Override
|
||||
public ByteString serialize(JDataVersionedWrapper obj) {
|
||||
ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
|
||||
buffer.putLong(obj.version());
|
||||
@@ -19,10 +20,10 @@ public class JDataVersionedWrapperSerializer {
|
||||
return ByteString.copyFrom(buffer).concat(dataSerializer.serialize(obj.data()));
|
||||
}
|
||||
|
||||
public JDataVersionedWrapper deserialize(ByteBuffer data) {
|
||||
var version = data.getLong();
|
||||
return new JDataVersionedWrapperLazy(version, data.remaining(),
|
||||
() -> dataSerializer.deserialize(data)
|
||||
);
|
||||
@Override
|
||||
public JDataVersionedWrapper deserialize(ByteString data) {
|
||||
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
|
||||
var rawData = data.substring(Long.BYTES);
|
||||
return new JDataVersionedWrapperLazy(version, rawData.size(), () -> dataSerializer.deserialize(rawData));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,8 +9,8 @@ public sealed interface JObjectKey extends Serializable, Comparable<JObjectKey>
|
||||
JObjectKeyMin MIN = new JObjectKeyMin();
|
||||
JObjectKeyMax MAX = new JObjectKeyMax();
|
||||
|
||||
static JObjectKey of(String value) {
|
||||
return new JObjectKeyImpl(value);
|
||||
static JObjectKey of(String name) {
|
||||
return new JObjectKeyImpl(name);
|
||||
}
|
||||
|
||||
static JObjectKey random() {
|
||||
@@ -26,13 +26,11 @@ public sealed interface JObjectKey extends Serializable, Comparable<JObjectKey>
|
||||
}
|
||||
|
||||
static JObjectKey fromBytes(byte[] bytes) {
|
||||
return new JObjectKeyImpl(new String(bytes, StandardCharsets.ISO_8859_1));
|
||||
return new JObjectKeyImpl(new String(bytes, StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
static JObjectKey fromByteBuffer(ByteBuffer buff) {
|
||||
byte[] bytes = new byte[buff.remaining()];
|
||||
buff.get(bytes);
|
||||
return new JObjectKeyImpl(bytes);
|
||||
return new JObjectKeyImpl(StandardCharsets.UTF_8.decode(buff).toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -41,7 +39,9 @@ public sealed interface JObjectKey extends Serializable, Comparable<JObjectKey>
|
||||
@Override
|
||||
String toString();
|
||||
|
||||
byte[] bytes();
|
||||
|
||||
ByteBuffer toByteBuffer();
|
||||
|
||||
String value();
|
||||
String name();
|
||||
}
|
||||
|
||||
@@ -1,31 +1,16 @@
|
||||
package com.usatiuk.objects;
|
||||
|
||||
import com.usatiuk.utils.UninitializedByteBuffer;
|
||||
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Objects;
|
||||
|
||||
public final class JObjectKeyImpl implements JObjectKey {
|
||||
@Serial
|
||||
private static final long serialVersionUID = 0L;
|
||||
private final String value;
|
||||
private transient ByteBuffer _bb = null;
|
||||
|
||||
public JObjectKeyImpl(String value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public JObjectKeyImpl(byte[] bytes) {
|
||||
this.value = new String(bytes, StandardCharsets.ISO_8859_1);
|
||||
}
|
||||
|
||||
public record JObjectKeyImpl(String name) implements JObjectKey {
|
||||
@Override
|
||||
public int compareTo(JObjectKey o) {
|
||||
switch (o) {
|
||||
case JObjectKeyImpl jObjectKeyImpl -> {
|
||||
return value.compareTo(jObjectKeyImpl.value());
|
||||
return name.compareTo(jObjectKeyImpl.name());
|
||||
}
|
||||
case JObjectKeyMax jObjectKeyMax -> {
|
||||
return -1;
|
||||
@@ -38,40 +23,21 @@ public final class JObjectKeyImpl implements JObjectKey {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return value;
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] bytes() {
|
||||
return name.getBytes(StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteBuffer toByteBuffer() {
|
||||
if (_bb != null) return _bb;
|
||||
|
||||
synchronized (this) {
|
||||
if (_bb != null) return _bb;
|
||||
var bytes = value.getBytes(StandardCharsets.ISO_8859_1);
|
||||
var directBb = UninitializedByteBuffer.allocate(bytes.length);
|
||||
directBb.put(bytes);
|
||||
directBb.flip();
|
||||
_bb = directBb;
|
||||
return directBb;
|
||||
}
|
||||
var heapBb = StandardCharsets.UTF_8.encode(name);
|
||||
if (heapBb.isDirect()) return heapBb;
|
||||
var directBb = UninitializedByteBuffer.allocateUninitialized(heapBb.remaining());
|
||||
directBb.put(heapBb);
|
||||
directBb.flip();
|
||||
return directBb;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this) return true;
|
||||
if (obj == null || obj.getClass() != this.getClass()) return false;
|
||||
var that = (JObjectKeyImpl) obj;
|
||||
return Objects.equals(this.value, that.value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return value.hashCode();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -18,13 +18,18 @@ public record JObjectKeyMax() implements JObjectKey {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] bytes() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteBuffer toByteBuffer() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String value() {
|
||||
public String name() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,13 +18,18 @@ public record JObjectKeyMin() implements JObjectKey {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] bytes() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteBuffer toByteBuffer() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String value() {
|
||||
public String name() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,13 +2,11 @@ package com.usatiuk.objects;
|
||||
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.utils.SerializationHelper;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import io.quarkus.arc.DefaultBean;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
@ApplicationScoped
|
||||
@DefaultBean
|
||||
@@ -18,8 +16,9 @@ public class JavaDataSerializer implements ObjectSerializer<JData> {
|
||||
return SerializationHelper.serialize(obj);
|
||||
}
|
||||
|
||||
public JData deserialize(ByteBuffer data) {
|
||||
try (var is = UnsafeByteOperations.unsafeWrap(data).newInput()) {
|
||||
@Override
|
||||
public JData deserialize(ByteString data) {
|
||||
try (var is = data.newInput()) {
|
||||
return SerializationHelper.deserialize(is);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
||||
@@ -2,10 +2,8 @@ package com.usatiuk.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
public interface ObjectSerializer<T> {
|
||||
ByteString serialize(T obj);
|
||||
|
||||
T deserialize(ByteBuffer data);
|
||||
T deserialize(ByteString data);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
public interface Data<V> extends MaybeTombstone<V> {
|
||||
V value();
|
||||
import java.util.Optional;
|
||||
|
||||
public record Data<V>(V value) implements MaybeTombstone<V> {
|
||||
@Override
|
||||
public Optional<V> opt() {
|
||||
return Optional.of(value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
public record DataWrapper<V>(V value) implements Data<V> {
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface IterProdFn<K extends Comparable<K>, V> {
|
||||
CloseableKvIterator<K, V> get(IteratorStart start, K key);
|
||||
}
|
||||
@@ -39,20 +39,20 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
|
||||
}
|
||||
|
||||
|
||||
// switch (start) {
|
||||
// case LT -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
// }
|
||||
// case GT -> {
|
||||
// assert _next == null || _next.compareTo(startKey) > 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
// }
|
||||
// }
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public interface MaybeTombstone<T> {
|
||||
Optional<T> opt();
|
||||
}
|
||||
|
||||
@@ -1,28 +1,25 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.mutable.MutableObject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.TreeMap;
|
||||
import java.util.*;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final NavigableMap<K, IteratorEntry<K, V>> _sortedIterators = new TreeMap<>();
|
||||
private final List<IteratorEntry<K, V>> _iterators;
|
||||
private final NavigableMap<K, CloseableKvIterator<K, V>> _sortedIterators = new TreeMap<>();
|
||||
private final String _name;
|
||||
private final Map<CloseableKvIterator<K, V>, Integer> _iterators;
|
||||
|
||||
public MergingKvIterator(IteratorStart startType, K startKey, List<CloseableKvIterator<K, V>> iterators) {
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
|
||||
_goingForward = true;
|
||||
_name = name;
|
||||
|
||||
{
|
||||
IteratorEntry<K, V>[] iteratorEntries = new IteratorEntry[iterators.size()];
|
||||
for (int i = 0; i < iterators.size(); i++) {
|
||||
iteratorEntries[i] = new IteratorEntry<>(i, iterators.get(i));
|
||||
}
|
||||
_iterators = List.of(iteratorEntries);
|
||||
}
|
||||
_iterators = Map.ofEntries(
|
||||
IntStream.range(0, iterators.size())
|
||||
.mapToObj(i -> Pair.of(iterators.get(i).get(startType, startKey), i))
|
||||
.toArray(Pair[]::new)
|
||||
);
|
||||
|
||||
if (startType == IteratorStart.LT || startType == IteratorStart.LE) {
|
||||
// Starting at a greatest key less than/less or equal than:
|
||||
@@ -33,8 +30,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
K greatestLess = null;
|
||||
K smallestMore = null;
|
||||
|
||||
for (var ite : _iterators) {
|
||||
var it = ite.iterator();
|
||||
for (var it : _iterators.keySet()) {
|
||||
if (it.hasNext()) {
|
||||
var peeked = it.peekNextKey();
|
||||
if (startType == IteratorStart.LE ? peeked.compareTo(startKey) <= 0 : peeked.compareTo(startKey) < 0) {
|
||||
@@ -59,83 +55,72 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
// Empty iterators
|
||||
}
|
||||
|
||||
for (var ite : _iterators) {
|
||||
var iterator = ite.iterator();
|
||||
for (var iterator : _iterators.keySet()) {
|
||||
while (iterator.hasNext() && iterator.peekNextKey().compareTo(initialMaxValue) < 0) {
|
||||
iterator.skip();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (IteratorEntry<K, V> iterator : _iterators) {
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
advanceIterator(iterator);
|
||||
}
|
||||
|
||||
// Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
|
||||
// switch (startType) {
|
||||
//// case LT -> {
|
||||
//// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
|
||||
//// }
|
||||
//// case LE -> {
|
||||
//// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
|
||||
//// }
|
||||
// case GT -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) > 0;
|
||||
Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
|
||||
switch (startType) {
|
||||
// case LT -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) >= 0;
|
||||
// case LE -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
|
||||
// }
|
||||
// }
|
||||
case GT -> {
|
||||
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
public MergingKvIterator(IteratorStart startType, K startKey, CloseableKvIterator<K, V>... iterators) {
|
||||
this(startType, startKey, List.of(iterators));
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
|
||||
this(name, startType, startKey, List.of(iterators));
|
||||
}
|
||||
|
||||
private void advanceIterator(IteratorEntry<K, V> iteratorEntry) {
|
||||
while (iteratorEntry.iterator().hasNext()) {
|
||||
K key = iteratorEntry.iterator().peekNextKey();
|
||||
// Log.tracev("{0} Advance peeked: {1}-{2}", _name, iteratorEntry, key);
|
||||
|
||||
MutableObject<IteratorEntry<K, V>> mutableBoolean = new MutableObject<>(null);
|
||||
|
||||
var newVal = _sortedIterators.merge(key, iteratorEntry, (theirsEntry, oldValOurs) -> {
|
||||
var oursPrio = oldValOurs.priority();
|
||||
var theirsPrio = theirsEntry.priority();
|
||||
|
||||
if (oursPrio < theirsPrio) {
|
||||
mutableBoolean.setValue(theirsEntry);
|
||||
return oldValOurs;
|
||||
// advance them
|
||||
// return
|
||||
} else {
|
||||
return theirsEntry;
|
||||
// skip, continue
|
||||
}
|
||||
});
|
||||
|
||||
if (newVal != iteratorEntry) {
|
||||
// Log.tracev("{0} Skipped: {1}", _name, iteratorEntry.iterator().peekNextKey());
|
||||
iteratorEntry.iterator().skip();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (mutableBoolean.getValue() != null) {
|
||||
advanceIterator(mutableBoolean.getValue());
|
||||
return;
|
||||
}
|
||||
private void advanceIterator(CloseableKvIterator<K, V> iterator) {
|
||||
if (!iterator.hasNext()) {
|
||||
return;
|
||||
}
|
||||
|
||||
K key = iterator.peekNextKey();
|
||||
Log.tracev("{0} Advance peeked: {1}-{2}", _name, iterator, key);
|
||||
if (!_sortedIterators.containsKey(key)) {
|
||||
_sortedIterators.put(key, iterator);
|
||||
return;
|
||||
}
|
||||
|
||||
// Expects that reversed iterator returns itself when reversed again
|
||||
var oursPrio = _iterators.get(_goingForward ? iterator : iterator.reversed());
|
||||
var them = _sortedIterators.get(key);
|
||||
var theirsPrio = _iterators.get(_goingForward ? them : them.reversed());
|
||||
if (oursPrio < theirsPrio) {
|
||||
_sortedIterators.put(key, iterator);
|
||||
advanceIterator(them);
|
||||
} else {
|
||||
Log.tracev("{0} Skipped: {1}", _name, iterator.peekNextKey());
|
||||
iterator.skip();
|
||||
advanceIterator(iterator);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
// Log.tracev("{0} Reversing from {1}", _name, cur);
|
||||
Log.tracev("{0} Reversing from {1}", _name, cur);
|
||||
_goingForward = !_goingForward;
|
||||
_sortedIterators.clear();
|
||||
for (IteratorEntry<K, V> iterator : _iterators) {
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
// _goingForward inverted already
|
||||
advanceIterator(!_goingForward ? iterator.reversed() : iterator);
|
||||
}
|
||||
@@ -149,6 +134,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
|| (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) {
|
||||
skipImpl();
|
||||
}
|
||||
Log.tracev("{0} Reversed to {1}", _name, _sortedIterators);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -164,9 +150,9 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
if (cur == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
cur.getValue().iterator().skip();
|
||||
cur.getValue().skip();
|
||||
advanceIterator(cur.getValue());
|
||||
// Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
|
||||
Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -180,7 +166,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
if (cur == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
var curVal = cur.getValue().iterator().next();
|
||||
var curVal = cur.getValue().next();
|
||||
advanceIterator(cur.getValue());
|
||||
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, cur.getValue(), curVal, _sortedIterators.keySet());
|
||||
return curVal;
|
||||
@@ -188,22 +174,30 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
for (IteratorEntry<K, V> iterator : _iterators) {
|
||||
iterator.iterator().close();
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
iterator.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MergingKvIterator{" +
|
||||
"_name='" + _name + '\'' +
|
||||
", _sortedIterators=" + _sortedIterators.keySet() +
|
||||
", _iterators=" + _iterators +
|
||||
'}';
|
||||
}
|
||||
|
||||
private record IteratorEntry<K extends Comparable<K>, V>(int priority, CloseableKvIterator<K, V> iterator) {
|
||||
public IteratorEntry<K, V> reversed() {
|
||||
return new IteratorEntry<>(priority, iterator.reversed());
|
||||
}
|
||||
private interface FirstMatchState<K extends Comparable<K>, V> {
|
||||
}
|
||||
|
||||
private record FirstMatchNone<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
|
||||
}
|
||||
|
||||
private record FirstMatchFound<K extends Comparable<K>, V>(
|
||||
CloseableKvIterator<K, V> iterator) implements FirstMatchState<K, V> {
|
||||
}
|
||||
|
||||
private record FirstMatchConsumed<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,22 +9,22 @@ public class NavigableMapKvIterator<K extends Comparable<K>, V> extends Reversib
|
||||
private Iterator<Map.Entry<K, V>> _iterator;
|
||||
private Map.Entry<K, V> _next;
|
||||
|
||||
public NavigableMapKvIterator(NavigableMap<K, ? extends V> map, IteratorStart start, K key) {
|
||||
_map = (NavigableMap<K, V>) map;
|
||||
public NavigableMapKvIterator(NavigableMap<K, V> map, IteratorStart start, K key) {
|
||||
_map = map;
|
||||
SortedMap<K, V> _view;
|
||||
_goingForward = true;
|
||||
switch (start) {
|
||||
case GE -> _view = _map.tailMap(key, true);
|
||||
case GT -> _view = _map.tailMap(key, false);
|
||||
case GE -> _view = map.tailMap(key, true);
|
||||
case GT -> _view = map.tailMap(key, false);
|
||||
case LE -> {
|
||||
var floorKey = _map.floorKey(key);
|
||||
var floorKey = map.floorKey(key);
|
||||
if (floorKey == null) _view = _map;
|
||||
else _view = _map.tailMap(floorKey, true);
|
||||
else _view = map.tailMap(floorKey, true);
|
||||
}
|
||||
case LT -> {
|
||||
var lowerKey = map.lowerKey(key);
|
||||
if (lowerKey == null) _view = _map;
|
||||
else _view = _map.tailMap(lowerKey, true);
|
||||
else _view = map.tailMap(lowerKey, true);
|
||||
}
|
||||
default -> throw new IllegalArgumentException("Unknown start type");
|
||||
}
|
||||
|
||||
@@ -1,27 +1,28 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final MergingKvIterator<K, MaybeTombstone<V>> _backing;
|
||||
private Pair<K, V> _next = null;
|
||||
public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends ReversibleKvIterator<K, V_T> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<V, V_T> _transformer;
|
||||
private Pair<K, V_T> _next = null;
|
||||
private boolean _checkedNext = false;
|
||||
|
||||
public TombstoneSkippingIterator(IteratorStart start, K startKey, List<CloseableKvIterator<K, MaybeTombstone<V>>> iterators) {
|
||||
public PredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<V, V_T> transformer) {
|
||||
_goingForward = true;
|
||||
_backing = new MergingKvIterator<>(start, startKey, iterators);
|
||||
_backing = backing;
|
||||
_transformer = transformer;
|
||||
|
||||
if (start == IteratorStart.GE || start == IteratorStart.GT)
|
||||
return;
|
||||
|
||||
boolean shouldGoBack = false;
|
||||
if (canHaveNext())
|
||||
tryFillNext();
|
||||
fillNext();
|
||||
|
||||
boolean shouldGoBack = false;
|
||||
if (start == IteratorStart.LE) {
|
||||
if (_next == null || _next.getKey().compareTo(startKey) > 0) {
|
||||
shouldGoBack = true;
|
||||
@@ -38,27 +39,34 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
|
||||
_backing.skipPrev();
|
||||
fillNext();
|
||||
_goingForward = true;
|
||||
if (_next != null)
|
||||
_backing.skip();
|
||||
_backing.skip();
|
||||
fillNext();
|
||||
}
|
||||
|
||||
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.getKey().compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.getKey().compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean canHaveNext() {
|
||||
return (_goingForward ? _backing.hasNext() : _backing.hasPrev());
|
||||
}
|
||||
|
||||
private boolean tryFillNext() {
|
||||
var next = _goingForward ? _backing.next() : _backing.prev();
|
||||
if (next.getValue() instanceof Tombstone<?>)
|
||||
return false;
|
||||
_next = Pair.of(next.getKey(), ((Data<V>) next.getValue()).value());
|
||||
return true;
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
while (_next == null && canHaveNext()) {
|
||||
tryFillNext();
|
||||
while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) {
|
||||
var next = _goingForward ? _backing.next() : _backing.prev();
|
||||
var transformed = _transformer.apply(next.getValue());
|
||||
if (transformed == null)
|
||||
continue;
|
||||
_next = Pair.of(next.getKey(), transformed);
|
||||
}
|
||||
_checkedNext = true;
|
||||
}
|
||||
@@ -73,6 +81,9 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
|
||||
else if (!_goingForward && !wasAtEnd)
|
||||
_backing.skipPrev();
|
||||
|
||||
if (!wasAtEnd)
|
||||
Log.tracev("Skipped in reverse: {0}", _next);
|
||||
|
||||
_next = null;
|
||||
_checkedNext = false;
|
||||
}
|
||||
@@ -107,7 +118,7 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<K, V> nextImpl() {
|
||||
protected Pair<K, V_T> nextImpl() {
|
||||
if (!_checkedNext)
|
||||
fillNext();
|
||||
|
||||
@@ -127,6 +138,7 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PredicateKvIterator{" +
|
||||
"_backing=" + _backing +
|
||||
", _next=" + _next +
|
||||
'}';
|
||||
}
|
||||
@@ -1,4 +1,10 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
public interface Tombstone<V> extends MaybeTombstone<V> {
|
||||
import java.util.Optional;
|
||||
|
||||
public record Tombstone<V>() implements MaybeTombstone<V> {
|
||||
@Override
|
||||
public Optional<V> opt() {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
public record TombstoneImpl<V>() implements Tombstone<V> {
|
||||
}
|
||||
@@ -0,0 +1,83 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class TombstoneMergingKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final String _name;
|
||||
|
||||
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, MaybeTombstone<V>>> iterators) {
|
||||
_name = name;
|
||||
_backing = new PredicateKvIterator<>(
|
||||
new MergingKvIterator<>(name + "-merging", startType, startKey, iterators),
|
||||
startType, startKey,
|
||||
pair -> {
|
||||
Log.tracev("{0} - Processing pair {1}", _name, pair);
|
||||
if (pair instanceof Tombstone) {
|
||||
return null;
|
||||
}
|
||||
return ((Data<V>) pair).value();
|
||||
});
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, MaybeTombstone<V>>... iterators) {
|
||||
this(name, startType, startKey, List.of(iterators));
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekNextKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekPrevKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> prev() {
|
||||
return _backing.prev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> next() {
|
||||
return _backing.next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TombstoneMergingKvIterator{" +
|
||||
"_backing=" + _backing +
|
||||
", _name='" + _name + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,15 @@
|
||||
package com.usatiuk.objects.snapshot;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.MaybeTombstone;
|
||||
import com.usatiuk.objects.iterators.Tombstone;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public interface Snapshot<K extends Comparable<K>, V> extends AutoCloseableNoThrow {
|
||||
List<CloseableKvIterator<K, MaybeTombstone<V>>> getIterator(IteratorStart start, K key);
|
||||
CloseableKvIterator<K, V> getIterator(IteratorStart start, K key);
|
||||
|
||||
@Nonnull
|
||||
Optional<V> readObject(K name);
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
package com.usatiuk.objects.snapshot;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
|
||||
import com.usatiuk.objects.transaction.TxRecord;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
@ApplicationScoped
|
||||
public class SnapshotManager {
|
||||
@Inject
|
||||
WritebackObjectPersistentStore writebackStore;
|
||||
|
||||
public Snapshot<JObjectKey, JDataVersionedWrapper> createSnapshot() {
|
||||
return writebackStore.getSnapshot();
|
||||
}
|
||||
|
||||
// This should not be called for the same objects concurrently
|
||||
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes) {
|
||||
return writebackStore.commitTx(writes);
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JDataVersionedWrapperLazy;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
@@ -17,7 +15,6 @@ import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
@@ -26,11 +23,47 @@ import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
@ApplicationScoped
|
||||
public class CachingObjectPersistentStore {
|
||||
private final AtomicReference<Cache> _cache;
|
||||
@Inject
|
||||
SerializingObjectPersistentStore delegate;
|
||||
@ConfigProperty(name = "dhfs.objects.lru.print-stats")
|
||||
boolean printStats;
|
||||
|
||||
private record Cache(TreePMap<JObjectKey, CacheEntry> map,
|
||||
int size,
|
||||
long version,
|
||||
int sizeLimit) {
|
||||
public Cache withPut(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
int objSize = obj.map(JDataVersionedWrapper::estimateSize).orElse(16);
|
||||
|
||||
int newSize = size() + objSize;
|
||||
var entry = new CacheEntry(obj.<MaybeTombstone<JDataVersionedWrapper>>map(Data::new).orElse(new Tombstone<>()), objSize);
|
||||
|
||||
var old = map.get(key);
|
||||
if (old != null)
|
||||
newSize -= old.size();
|
||||
|
||||
TreePMap<JObjectKey, CacheEntry> newCache = map().plus(key, entry);
|
||||
|
||||
while (newSize > sizeLimit) {
|
||||
var del = newCache.firstEntry();
|
||||
newCache = newCache.minusFirstEntry();
|
||||
newSize -= del.getValue().size();
|
||||
}
|
||||
return new Cache(
|
||||
newCache,
|
||||
newSize,
|
||||
version,
|
||||
sizeLimit
|
||||
);
|
||||
}
|
||||
|
||||
public Cache withVersion(long version) {
|
||||
return new Cache(map, size, version, sizeLimit);
|
||||
}
|
||||
}
|
||||
|
||||
private final AtomicReference<Cache> _cache;
|
||||
private ExecutorService _commitExecutor;
|
||||
private ExecutorService _statusExecutor;
|
||||
private AtomicLong _cached = new AtomicLong();
|
||||
private AtomicLong _cacheTries = new AtomicLong();
|
||||
@@ -46,6 +79,7 @@ public class CachingObjectPersistentStore {
|
||||
_cache.set(_cache.get().withVersion(s.id()));
|
||||
}
|
||||
|
||||
_commitExecutor = Executors.newSingleThreadExecutor();
|
||||
if (printStats) {
|
||||
_statusExecutor = Executors.newSingleThreadExecutor();
|
||||
_statusExecutor.submit(() -> {
|
||||
@@ -66,6 +100,7 @@ public class CachingObjectPersistentStore {
|
||||
Log.tracev("Committing: {0} writes, {1} deletes", objs.written().size(), objs.deleted().size());
|
||||
|
||||
var cache = _cache.get();
|
||||
var commitFuture = _commitExecutor.submit(() -> delegate.prepareTx(objs, txId).run());
|
||||
for (var write : objs.written()) {
|
||||
cache = cache.withPut(write.getLeft(), Optional.of(write.getRight()));
|
||||
}
|
||||
@@ -73,7 +108,11 @@ public class CachingObjectPersistentStore {
|
||||
cache = cache.withPut(del, Optional.empty());
|
||||
}
|
||||
cache = cache.withVersion(txId);
|
||||
delegate.commitTx(objs, txId);
|
||||
try {
|
||||
commitFuture.get();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
_cache.set(cache);
|
||||
|
||||
Log.tracev("Committed: {0} writes, {1} deletes", objs.written().size(), objs.deleted().size());
|
||||
@@ -101,12 +140,11 @@ public class CachingObjectPersistentStore {
|
||||
Snapshot<JObjectKey, JDataVersionedWrapper> finalBacking = backing;
|
||||
Cache finalCurCache = curCache;
|
||||
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
|
||||
private boolean _invalid = false;
|
||||
private final Cache _curCache = finalCurCache;
|
||||
private final Snapshot<JObjectKey, JDataVersionedWrapper> _backing = finalBacking;
|
||||
private boolean _invalid = false;
|
||||
private boolean _closed = false;
|
||||
|
||||
private void doCache(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
private void maybeCache(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
_cacheTries.incrementAndGet();
|
||||
if (_invalid)
|
||||
return;
|
||||
@@ -122,36 +160,18 @@ public class CachingObjectPersistentStore {
|
||||
_cached.incrementAndGet();
|
||||
}
|
||||
|
||||
private void maybeCache(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
if (obj.isEmpty()) {
|
||||
doCache(key, obj);
|
||||
return;
|
||||
}
|
||||
|
||||
var wrapper = obj.get();
|
||||
|
||||
if (!(wrapper instanceof JDataVersionedWrapperLazy lazy)) {
|
||||
doCache(key, obj);
|
||||
return;
|
||||
}
|
||||
|
||||
lazy.setCacheCallback(() -> {
|
||||
if (_closed) {
|
||||
Log.error("Cache callback called after close");
|
||||
System.exit(-1);
|
||||
}
|
||||
doCache(key, obj);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return ListUtils.prependAndMap(
|
||||
new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), start, key),
|
||||
_backing.getIterator(start, key),
|
||||
i -> new CachingKvIterator((CloseableKvIterator<JObjectKey, JDataVersionedWrapper>) (CloseableKvIterator<JObjectKey, ?>) i)
|
||||
);
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new TombstoneMergingKvIterator<>("cache", start, key,
|
||||
(mS, mK)
|
||||
-> new MappingKvIterator<>(
|
||||
new NavigableMapKvIterator<>(_curCache.map(), mS, mK),
|
||||
e -> {
|
||||
// Log.tracev("Taken from cache: {0}", e);
|
||||
return e.object();
|
||||
}
|
||||
),
|
||||
(mS, mK) -> new MappingKvIterator<>(new CachingKvIterator(_backing.getIterator(start, key)), Data::new));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@@ -159,12 +179,12 @@ public class CachingObjectPersistentStore {
|
||||
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
|
||||
var cached = _curCache.map().get(name);
|
||||
if (cached != null) {
|
||||
return switch (cached) {
|
||||
case CacheEntryPresent data -> Optional.of(data.value());
|
||||
case CacheEntryMiss tombstone -> {
|
||||
return switch (cached.object()) {
|
||||
case Data<JDataVersionedWrapper> data -> Optional.of(data.value());
|
||||
case Tombstone<JDataVersionedWrapper> tombstone -> {
|
||||
yield Optional.empty();
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + cached);
|
||||
default -> throw new IllegalStateException("Unexpected value: " + cached.object());
|
||||
};
|
||||
}
|
||||
var read = _backing.readObject(name);
|
||||
@@ -179,11 +199,10 @@ public class CachingObjectPersistentStore {
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_closed = true;
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> {
|
||||
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, JDataVersionedWrapper> {
|
||||
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _delegate;
|
||||
|
||||
private CachingKvIterator(CloseableKvIterator<JObjectKey, JDataVersionedWrapper> delegate) {
|
||||
@@ -216,10 +235,10 @@ public class CachingObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> prev() {
|
||||
public Pair<JObjectKey, JDataVersionedWrapper> prev() {
|
||||
var prev = _delegate.prev();
|
||||
maybeCache(prev.getKey(), Optional.of(prev.getValue()));
|
||||
return (Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (Pair<JObjectKey, ?>) prev;
|
||||
return prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -233,10 +252,10 @@ public class CachingObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> next() {
|
||||
public Pair<JObjectKey, JDataVersionedWrapper> next() {
|
||||
var next = _delegate.next();
|
||||
maybeCache(next.getKey(), Optional.of(next.getValue()));
|
||||
return (Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (Pair<JObjectKey, ?>) next;
|
||||
return next;
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -249,53 +268,6 @@ public class CachingObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
private interface CacheEntry extends MaybeTombstone<JDataVersionedWrapper> {
|
||||
int size();
|
||||
}
|
||||
|
||||
private record Cache(TreePMap<JObjectKey, CacheEntry> map,
|
||||
int size,
|
||||
long version,
|
||||
int sizeLimit) {
|
||||
public Cache withPut(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
var entry = obj.<CacheEntry>map(o -> new CacheEntryPresent(o, o.estimateSize())).orElse(new CacheEntryMiss());
|
||||
|
||||
int newSize = size() + entry.size();
|
||||
|
||||
var old = map.get(key);
|
||||
if (old != null)
|
||||
newSize -= old.size();
|
||||
|
||||
TreePMap<JObjectKey, CacheEntry> newCache = map();
|
||||
|
||||
while (newSize > sizeLimit) {
|
||||
var del = newCache.firstEntry();
|
||||
newCache = newCache.minusFirstEntry();
|
||||
newSize -= del.getValue().size();
|
||||
}
|
||||
|
||||
newCache = newCache.plus(key, entry);
|
||||
return new Cache(
|
||||
newCache,
|
||||
newSize,
|
||||
version,
|
||||
sizeLimit
|
||||
);
|
||||
}
|
||||
|
||||
public Cache withVersion(long version) {
|
||||
return new Cache(map, size, version, sizeLimit);
|
||||
}
|
||||
}
|
||||
|
||||
private record CacheEntryPresent(JDataVersionedWrapper value,
|
||||
int size) implements CacheEntry, Data<JDataVersionedWrapper> {
|
||||
}
|
||||
|
||||
private record CacheEntryMiss() implements CacheEntry, Tombstone<JDataVersionedWrapper> {
|
||||
@Override
|
||||
public int size() {
|
||||
return 64;
|
||||
}
|
||||
private record CacheEntry(MaybeTombstone<JDataVersionedWrapper> object, int size) {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
|
||||
import com.usatiuk.dhfs.utils.RefcountedCloseable;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.JObjectKeyMax;
|
||||
import com.usatiuk.objects.JObjectKeyMin;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.KeyPredicateKvIterator;
|
||||
import com.usatiuk.objects.iterators.ReversibleKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -23,10 +29,9 @@ import java.lang.ref.Cleaner;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.Arrays;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.lmdbjava.DbiFlags.MDB_CREATE;
|
||||
import static org.lmdbjava.Env.create;
|
||||
@@ -35,20 +40,7 @@ import static org.lmdbjava.Env.create;
|
||||
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "lmdb")
|
||||
public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
private static final String DB_NAME = "objects";
|
||||
private static final String DB_VER_OBJ_NAME_STR = "__DB_VER_OBJ";
|
||||
private static final ByteBuffer DB_VER_OBJ_NAME;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.lmdb.size", defaultValue = "1000000000000")
|
||||
long lmdbSize;
|
||||
|
||||
static {
|
||||
byte[] tmp = DB_VER_OBJ_NAME_STR.getBytes(StandardCharsets.ISO_8859_1);
|
||||
var bb = ByteBuffer.allocateDirect(tmp.length);
|
||||
bb.put(tmp);
|
||||
bb.flip();
|
||||
DB_VER_OBJ_NAME = bb.asReadOnlyBuffer();
|
||||
}
|
||||
|
||||
private static final byte[] DB_VER_OBJ_NAME = "__DB_VER_OBJ".getBytes(StandardCharsets.UTF_8);
|
||||
private final Path _root;
|
||||
private Env<ByteBuffer> _env;
|
||||
private Dbi<ByteBuffer> _db;
|
||||
@@ -64,7 +56,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
_root.toFile().mkdirs();
|
||||
}
|
||||
_env = create()
|
||||
.setMapSize(lmdbSize)
|
||||
.setMapSize(1_000_000_000_000L)
|
||||
.setMaxDbs(1)
|
||||
.open(_root.toFile(), EnvFlags.MDB_NOTLS);
|
||||
_db = _env.openDbi(DB_NAME, MDB_CREATE);
|
||||
@@ -74,10 +66,13 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
if (read.isPresent()) {
|
||||
Log.infov("Read tx id {0}", read.get());
|
||||
} else {
|
||||
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
|
||||
bb.put(DB_VER_OBJ_NAME);
|
||||
bb.flip();
|
||||
var bbData = ByteBuffer.allocateDirect(8);
|
||||
bbData.putLong(0);
|
||||
bbData.flip();
|
||||
_db.put(txn, DB_VER_OBJ_NAME.asReadOnlyBuffer(), bbData);
|
||||
_db.put(txn, bb, bbData);
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
@@ -86,7 +81,10 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
|
||||
private Optional<Long> readTxId(Txn<ByteBuffer> txn) {
|
||||
var value = _db.get(txn, DB_VER_OBJ_NAME.asReadOnlyBuffer());
|
||||
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
|
||||
bb.put(DB_VER_OBJ_NAME);
|
||||
bb.flip();
|
||||
var value = _db.get(txn, bb);
|
||||
return Optional.ofNullable(value).map(ByteBuffer::getLong);
|
||||
}
|
||||
|
||||
@@ -100,57 +98,66 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
if (!_ready) throw new IllegalStateException("Wrong service order!");
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Snapshot<JObjectKey, ByteBuffer> getSnapshot() {
|
||||
var txn = _env.txnRead();
|
||||
try {
|
||||
long commitId = readTxId(txn).orElseThrow();
|
||||
return new Snapshot<JObjectKey, ByteBuffer>() {
|
||||
private final Txn<ByteBuffer> _txn = txn;
|
||||
private final long _id = commitId;
|
||||
private boolean _closed = false;
|
||||
|
||||
@Override
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
assert !_closed;
|
||||
return List.of(new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn, start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR)));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteBuffer> readObject(JObjectKey name) {
|
||||
assert !_closed;
|
||||
var got = _db.get(_txn, name.toByteBuffer());
|
||||
var ret = Optional.ofNullable(got).map(ByteBuffer::asReadOnlyBuffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long id() {
|
||||
assert !_closed;
|
||||
return _id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
assert !_closed;
|
||||
_closed = true;
|
||||
_txn.close();
|
||||
}
|
||||
};
|
||||
} catch (Exception e) {
|
||||
txn.close();
|
||||
throw e;
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
verifyReady();
|
||||
try (Txn<ByteBuffer> txn = _env.txnRead()) {
|
||||
var value = _db.get(txn, name.toByteBuffer());
|
||||
return Optional.ofNullable(value).map(ByteString::copyFrom);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitTx(TxManifestRaw names, long txId) {
|
||||
public Snapshot<JObjectKey, ByteString> getSnapshot() {
|
||||
var txn = new RefcountedCloseable<>(_env.txnRead());
|
||||
long commitId = readTxId(txn.get()).orElseThrow();
|
||||
return new Snapshot<JObjectKey, ByteString>() {
|
||||
private final RefcountedCloseable<Txn<ByteBuffer>> _txn = txn;
|
||||
private final long _id = commitId;
|
||||
private boolean _closed = false;
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
assert !_closed;
|
||||
return new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn.ref(), start, key), start, key, (k) -> !Arrays.equals(k.name().getBytes(StandardCharsets.UTF_8), DB_VER_OBJ_NAME));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
assert !_closed;
|
||||
var got = _db.get(_txn.get(), name.toByteBuffer());
|
||||
var ret = Optional.ofNullable(got).map(ByteString::copyFrom);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long id() {
|
||||
assert !_closed;
|
||||
return _id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
assert !_closed;
|
||||
_closed = true;
|
||||
_txn.unref();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Runnable prepareTx(TxManifestRaw names, long txId) {
|
||||
verifyReady();
|
||||
try (var txn = _env.txnWrite()) {
|
||||
var txn = _env.txnWrite();
|
||||
try {
|
||||
for (var written : names.written()) {
|
||||
var putBb = _db.reserve(txn, written.getKey().toByteBuffer(), written.getValue().size());
|
||||
written.getValue().copyTo(putBb);
|
||||
// TODO:
|
||||
var bb = UninitializedByteBuffer.allocateUninitialized(written.getValue().size());
|
||||
bb.put(written.getValue().asReadOnlyByteBuffer());
|
||||
bb.flip();
|
||||
_db.put(txn, written.getKey().toByteBuffer(), bb);
|
||||
}
|
||||
for (JObjectKey key : names.deleted()) {
|
||||
_db.delete(txn, key.toByteBuffer());
|
||||
@@ -158,12 +165,24 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
|
||||
assert txId > readTxId(txn).orElseThrow();
|
||||
|
||||
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
|
||||
bb.put(DB_VER_OBJ_NAME);
|
||||
bb.flip();
|
||||
var bbData = ByteBuffer.allocateDirect(8);
|
||||
bbData.putLong(txId);
|
||||
bbData.flip();
|
||||
_db.put(txn, DB_VER_OBJ_NAME.asReadOnlyBuffer(), bbData);
|
||||
txn.commit();
|
||||
_db.put(txn, bb, bbData);
|
||||
} catch (Throwable t) {
|
||||
txn.close();
|
||||
throw t;
|
||||
}
|
||||
return () -> {
|
||||
try {
|
||||
txn.commit();
|
||||
} finally {
|
||||
txn.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -184,30 +203,29 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
return _root.toFile().getUsableSpace();
|
||||
}
|
||||
|
||||
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>> {
|
||||
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, ByteString> {
|
||||
private static final Cleaner CLEANER = Cleaner.create();
|
||||
private final Txn<ByteBuffer> _txn; // Managed by the snapshot
|
||||
private final RefcountedCloseable<Txn<ByteBuffer>> _txn;
|
||||
private final Cursor<ByteBuffer> _cursor;
|
||||
private final MutableObject<Boolean> _closed = new MutableObject<>(false);
|
||||
// private final Exception _allocationStacktrace = new Exception();
|
||||
// private final Exception _allocationStacktrace = null;
|
||||
private final Exception _allocationStacktrace = null;
|
||||
private boolean _hasNext = false;
|
||||
private JObjectKey _peekedNextKey = null;
|
||||
|
||||
LmdbKvIterator(Txn<ByteBuffer> txn, IteratorStart start, JObjectKey key) {
|
||||
LmdbKvIterator(RefcountedCloseable<Txn<ByteBuffer>> txn, IteratorStart start, JObjectKey key) {
|
||||
_txn = txn;
|
||||
_goingForward = true;
|
||||
|
||||
_cursor = _db.openCursor(_txn);
|
||||
_cursor = _db.openCursor(_txn.get());
|
||||
|
||||
var closedRef = _closed;
|
||||
// var bt = _allocationStacktrace;
|
||||
// CLEANER.register(this, () -> {
|
||||
// if (!closedRef.getValue()) {
|
||||
// Log.error("Iterator was not closed before GC, allocated at: {0}", bt);
|
||||
// System.exit(-1);
|
||||
// }
|
||||
// });
|
||||
var bt = _allocationStacktrace;
|
||||
CLEANER.register(this, () -> {
|
||||
if (!closedRef.getValue()) {
|
||||
Log.error("Iterator was not closed before GC, allocated at: {0}", bt);
|
||||
System.exit(-1);
|
||||
}
|
||||
});
|
||||
|
||||
verifyReady();
|
||||
|
||||
@@ -262,24 +280,24 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
// var realGot = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
// _cursor.key().flip();
|
||||
//
|
||||
// switch (start) {
|
||||
// case LT -> {
|
||||
//// assert !_hasNext || realGot.compareTo(key) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
//// assert !_hasNext || realGot.compareTo(key) <= 0;
|
||||
// }
|
||||
// case GT -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) > 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) >= 0;
|
||||
// }
|
||||
// }
|
||||
// Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
|
||||
var realGot = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
_cursor.key().flip();
|
||||
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert !_hasNext || realGot.compareTo(key) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert !_hasNext || realGot.compareTo(key) >= 0;
|
||||
}
|
||||
}
|
||||
Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -289,6 +307,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
_closed.setValue(true);
|
||||
_cursor.close();
|
||||
_txn.unref();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -307,7 +326,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
_goingForward = !_goingForward;
|
||||
_peekedNextKey = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -315,12 +333,8 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
if (!_hasNext) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
if (_peekedNextKey != null) {
|
||||
return _peekedNextKey;
|
||||
}
|
||||
var ret = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
_cursor.key().flip();
|
||||
_peekedNextKey = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -330,7 +344,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
_hasNext = _cursor.next();
|
||||
else
|
||||
_hasNext = _cursor.prev();
|
||||
_peekedNextKey = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -339,19 +352,23 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<JObjectKey, MaybeTombstone<ByteBuffer>> nextImpl() {
|
||||
protected Pair<JObjectKey, ByteString> nextImpl() {
|
||||
if (!_hasNext) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
// TODO: Right now with java serialization it doesn't matter, it's all copied to arrays anyway
|
||||
var val = _cursor.val();
|
||||
Pair<JObjectKey, MaybeTombstone<ByteBuffer>> ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), new DataWrapper<>(val.asReadOnlyBuffer()));
|
||||
// var val = _cursor.val();
|
||||
// var bbDirect = UninitializedByteBuffer.allocateUninitialized(val.remaining());
|
||||
// bbDirect.put(val);
|
||||
// bbDirect.flip();
|
||||
// var bs = UnsafeByteOperations.unsafeWrap(bbDirect);
|
||||
// var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), bs);
|
||||
var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), ByteString.copyFrom(_cursor.val()));
|
||||
if (_goingForward)
|
||||
_hasNext = _cursor.next();
|
||||
else
|
||||
_hasNext = _cursor.prev();
|
||||
// Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
|
||||
_peekedNextKey = null;
|
||||
Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,18 +2,18 @@ package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.JObjectKeyImpl;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory")
|
||||
@@ -22,22 +22,30 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
|
||||
private TreePMap<JObjectKey, ByteString> _objects = TreePMap.empty();
|
||||
private long _lastCommitId = 0;
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Snapshot<JObjectKey, ByteBuffer> getSnapshot() {
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
synchronized (this) {
|
||||
return new Snapshot<JObjectKey, ByteBuffer>() {
|
||||
return Optional.ofNullable(_objects.get(name));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snapshot<JObjectKey, ByteString> getSnapshot() {
|
||||
synchronized (this) {
|
||||
return new Snapshot<JObjectKey, ByteString>() {
|
||||
private final TreePMap<JObjectKey, ByteString> _objects = MemoryObjectPersistentStore.this._objects;
|
||||
private final long _lastCommitId = MemoryObjectPersistentStore.this._lastCommitId;
|
||||
|
||||
@Override
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return List.of(new MappingKvIterator<>(new NavigableMapKvIterator<>(_objects, start, key), s -> new DataWrapper<>(s.asReadOnlyByteBuffer())));
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new NavigableMapKvIterator<>(_objects, start, key);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteBuffer> readObject(JObjectKey name) {
|
||||
return Optional.ofNullable(_objects.get(name)).map(ByteString::asReadOnlyByteBuffer);
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
return Optional.ofNullable(_objects.get(name));
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -53,18 +61,19 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitTx(TxManifestRaw names, long txId) {
|
||||
synchronized (this) {
|
||||
for (var written : names.written()) {
|
||||
_objects = _objects.plus(written.getKey(), written.getValue());
|
||||
public Runnable prepareTx(TxManifestRaw names, long txId) {
|
||||
return () -> {
|
||||
synchronized (this) {
|
||||
for (var written : names.written()) {
|
||||
_objects = _objects.plus(written.getKey(), written.getValue());
|
||||
}
|
||||
for (JObjectKey key : names.deleted()) {
|
||||
_objects = _objects.minus(key);
|
||||
}
|
||||
assert txId > _lastCommitId;
|
||||
_lastCommitId = txId;
|
||||
}
|
||||
for (JObjectKey key : names.deleted()) {
|
||||
_objects = _objects.minus(key);
|
||||
}
|
||||
assert txId > _lastCommitId;
|
||||
_lastCommitId = txId;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -2,18 +2,23 @@ package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
// Persistent storage of objects
|
||||
// All changes are written as sequential transactions
|
||||
public interface ObjectPersistentStore {
|
||||
Snapshot<JObjectKey, ByteBuffer> getSnapshot();
|
||||
@Nonnull
|
||||
Optional<ByteString> readObject(JObjectKey name);
|
||||
|
||||
void commitTx(TxManifestRaw names, long txId);
|
||||
Snapshot<JObjectKey, ByteString> getSnapshot();
|
||||
|
||||
Runnable prepareTx(TxManifestRaw names, long txId);
|
||||
|
||||
long getTotalSpace();
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.Tombstone;
|
||||
|
||||
public record PendingDelete(JObjectKey key,
|
||||
long bundleId) implements PendingWriteEntry, Tombstone<JDataVersionedWrapper> {
|
||||
public record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry {
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.iterators.Data;
|
||||
|
||||
public record PendingWrite(JDataVersionedWrapper value,
|
||||
long bundleId) implements PendingWriteEntry, Data<JDataVersionedWrapper> {
|
||||
public record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry {
|
||||
}
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.iterators.MaybeTombstone;
|
||||
|
||||
public interface PendingWriteEntry extends MaybeTombstone<JDataVersionedWrapper> {
|
||||
public interface PendingWriteEntry {
|
||||
long bundleId();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,280 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.JObjectKeyMax;
|
||||
import com.usatiuk.objects.JObjectKeyMin;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.ReversibleKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import org.rocksdb.*;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Optional;
|
||||
|
||||
@ApplicationScoped
|
||||
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "rocks")
|
||||
public class RocksDbObjectPersistentStore implements ObjectPersistentStore {
|
||||
private static final String DB_NAME = "objects";
|
||||
private static final byte[] DB_VER_OBJ_NAME = "__DB_VER_OBJ".getBytes(StandardCharsets.UTF_8);
|
||||
private final Path _root;
|
||||
private Options _options;
|
||||
private TransactionDBOptions _transactionDBOptions;
|
||||
private TransactionDB _db;
|
||||
private boolean _ready = false;
|
||||
|
||||
public RocksDbObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) {
|
||||
_root = Path.of(root).resolve("objects");
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(100) StartupEvent event) throws RocksDBException {
|
||||
if (!_root.toFile().exists()) {
|
||||
Log.info("Initializing with root " + _root);
|
||||
_root.toFile().mkdirs();
|
||||
}
|
||||
|
||||
RocksDB.loadLibrary();
|
||||
|
||||
_options = new Options().setCreateIfMissing(true);
|
||||
_transactionDBOptions = new TransactionDBOptions();
|
||||
_db = TransactionDB.open(_options, _transactionDBOptions, _root.toString());
|
||||
|
||||
try (var txn = _db.beginTransaction(new WriteOptions())) {
|
||||
var read = readTxId(txn);
|
||||
if (read.isPresent()) {
|
||||
Log.infov("Read tx id {0}", read.get());
|
||||
} else {
|
||||
txn.put(DB_VER_OBJ_NAME, ByteBuffer.allocate(8).putLong(0).array());
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
private Optional<Long> readTxId(Transaction txn) throws RocksDBException {
|
||||
var value = txn.get(new ReadOptions(), DB_VER_OBJ_NAME);
|
||||
return Optional.ofNullable(value).map(ByteBuffer::wrap).map(ByteBuffer::getLong);
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(900) ShutdownEvent event) {
|
||||
_ready = false;
|
||||
_db.close();
|
||||
}
|
||||
|
||||
private void verifyReady() {
|
||||
if (!_ready) throw new IllegalStateException("Wrong service order!");
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
verifyReady();
|
||||
byte[] got = null;
|
||||
try {
|
||||
got = _db.get(new ReadOptions(), name.bytes());
|
||||
} catch (RocksDBException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return Optional.ofNullable(got).map(ByteString::copyFrom);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snapshot<JObjectKey, ByteString> getSnapshot() {
|
||||
var txn = _db.beginTransaction(new WriteOptions());
|
||||
txn.setSnapshot();
|
||||
var rocksDbSnapshot = txn.getSnapshot();
|
||||
long commitId = 0;
|
||||
try {
|
||||
commitId = readTxId(txn).orElseThrow();
|
||||
} catch (RocksDBException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
long finalCommitId = commitId;
|
||||
return new Snapshot<JObjectKey, ByteString>() {
|
||||
private final Transaction _txn = txn;
|
||||
private final long _id = finalCommitId;
|
||||
private final org.rocksdb.Snapshot _rocksDbSnapshot = rocksDbSnapshot;
|
||||
private boolean _closed = false;
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
assert !_closed;
|
||||
return new RocksDbKvIterator(_txn, start, key, _rocksDbSnapshot);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
assert !_closed;
|
||||
try (var readOptions = new ReadOptions().setSnapshot(_rocksDbSnapshot)) {
|
||||
var got = _txn.get(readOptions, name.bytes());
|
||||
return Optional.ofNullable(got).map(ByteString::copyFrom);
|
||||
} catch (RocksDBException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long id() {
|
||||
assert !_closed;
|
||||
return _id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
assert !_closed;
|
||||
_closed = true;
|
||||
_txn.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Runnable prepareTx(TxManifestRaw names, long txId) {
|
||||
verifyReady();
|
||||
var txn = _db.beginTransaction(new WriteOptions());
|
||||
try {
|
||||
for (var written : names.written()) {
|
||||
txn.put(written.getKey().bytes(), written.getValue().toByteArray());
|
||||
}
|
||||
for (JObjectKey key : names.deleted()) {
|
||||
txn.delete(key.bytes());
|
||||
}
|
||||
|
||||
assert txId > readTxId(txn).orElseThrow();
|
||||
|
||||
txn.put(DB_VER_OBJ_NAME, ByteBuffer.allocate(8).putLong(txId).array());
|
||||
} catch (Throwable t) {
|
||||
txn.close();
|
||||
throw new RuntimeException(t);
|
||||
}
|
||||
return () -> {
|
||||
try {
|
||||
txn.commit();
|
||||
} catch (RocksDBException e) {
|
||||
throw new RuntimeException(e);
|
||||
} finally {
|
||||
txn.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getTotalSpace() {
|
||||
verifyReady();
|
||||
return _root.toFile().getTotalSpace();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFreeSpace() {
|
||||
verifyReady();
|
||||
return _root.toFile().getFreeSpace();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsableSpace() {
|
||||
verifyReady();
|
||||
return _root.toFile().getUsableSpace();
|
||||
}
|
||||
|
||||
private class RocksDbKvIterator extends ReversibleKvIterator<JObjectKey, ByteString> {
|
||||
private final RocksIterator _iterator;
|
||||
private final org.rocksdb.Snapshot _rocksDbSnapshot;
|
||||
private final ReadOptions _readOptions;
|
||||
private boolean _hasNext;
|
||||
|
||||
RocksDbKvIterator(Transaction txn, IteratorStart start, JObjectKey key, org.rocksdb.Snapshot rocksDbSnapshot) {
|
||||
_rocksDbSnapshot = rocksDbSnapshot;
|
||||
_readOptions = new ReadOptions().setSnapshot(_rocksDbSnapshot);
|
||||
_iterator = txn.getIterator(_readOptions);
|
||||
verifyReady();
|
||||
|
||||
if (key instanceof JObjectKeyMin) {
|
||||
_iterator.seekToFirst();
|
||||
} else if (key instanceof JObjectKeyMax) {
|
||||
_iterator.seekToLast();
|
||||
} else {
|
||||
_iterator.seek(key.bytes());
|
||||
}
|
||||
_hasNext = _iterator.isValid();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_iterator.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
if (_hasNext) {
|
||||
if (_goingForward) {
|
||||
_iterator.prev();
|
||||
} else {
|
||||
_iterator.next();
|
||||
}
|
||||
} else {
|
||||
if (_goingForward) {
|
||||
_iterator.seekToLast();
|
||||
} else {
|
||||
_iterator.seekToFirst();
|
||||
}
|
||||
}
|
||||
_goingForward = !_goingForward;
|
||||
_hasNext = _iterator.isValid();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JObjectKey peekImpl() {
|
||||
if (!_hasNext) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
return JObjectKey.fromByteBuffer(ByteBuffer.wrap(_iterator.key()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void skipImpl() {
|
||||
if (_goingForward) {
|
||||
_iterator.next();
|
||||
} else {
|
||||
_iterator.prev();
|
||||
}
|
||||
_hasNext = _iterator.isValid();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean hasImpl() {
|
||||
return _hasNext;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<JObjectKey, ByteString> nextImpl() {
|
||||
if (!_hasNext) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
var key = JObjectKey.fromByteBuffer(ByteBuffer.wrap(_iterator.key()));
|
||||
var value = ByteString.copyFrom(_iterator.value());
|
||||
if (_goingForward) {
|
||||
_iterator.next();
|
||||
} else {
|
||||
_iterator.prev();
|
||||
}
|
||||
_hasNext = _iterator.isValid();
|
||||
return Pair.of(key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,38 +1,37 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JDataVersionedWrapperSerializer;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.JObjectKeyImpl;
|
||||
import com.usatiuk.objects.ObjectSerializer;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.MappingKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class SerializingObjectPersistentStore {
|
||||
@Inject
|
||||
JDataVersionedWrapperSerializer serializer;
|
||||
ObjectSerializer<JDataVersionedWrapper> serializer;
|
||||
|
||||
@Inject
|
||||
ObjectPersistentStore delegateStore;
|
||||
|
||||
|
||||
public Snapshot<JObjectKey, JDataVersionedWrapper> getSnapshot() {
|
||||
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
|
||||
private final Snapshot<JObjectKey, ByteBuffer> _backing = delegateStore.getSnapshot();
|
||||
private final Snapshot<JObjectKey, ByteString> _backing = delegateStore.getSnapshot();
|
||||
|
||||
@Override
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return ListUtils.map(_backing.getIterator(start, key),
|
||||
i -> new MappingKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>, MaybeTombstone<JDataVersionedWrapper>>(i,
|
||||
d -> serializer.deserialize(((DataWrapper<ByteBuffer>) d).value())));
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new MappingKvIterator<>(_backing.getIterator(start, key), d -> serializer.deserialize(d));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@@ -56,13 +55,13 @@ public class SerializingObjectPersistentStore {
|
||||
|
||||
private TxManifestRaw prepareManifest(TxManifestObj<? extends JDataVersionedWrapper> objs) {
|
||||
return new TxManifestRaw(
|
||||
objs.written().parallelStream()
|
||||
objs.written().stream()
|
||||
.map(e -> Pair.of(e.getKey(), serializer.serialize(e.getValue())))
|
||||
.toList()
|
||||
, objs.deleted());
|
||||
}
|
||||
|
||||
void commitTx(TxManifestObj<? extends JDataVersionedWrapper> objects, long txId) {
|
||||
delegateStore.commitTx(prepareManifest(objects), txId);
|
||||
Runnable prepareTx(TxManifestObj<? extends JDataVersionedWrapper> objects, long txId) {
|
||||
return delegateStore.prepareTx(prepareManifest(objects), txId);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,13 +3,11 @@ package com.usatiuk.objects.stores;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JDataVersionedWrapperImpl;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.MaybeTombstone;
|
||||
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
|
||||
import com.usatiuk.objects.JObjectKeyImpl;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.transaction.TxCommitException;
|
||||
import com.usatiuk.objects.transaction.TxRecord;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
@@ -29,38 +27,33 @@ import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
@ApplicationScoped
|
||||
public class WritebackObjectPersistentStore {
|
||||
@Inject
|
||||
CachingObjectPersistentStore cachedStore;
|
||||
@Inject
|
||||
ExecutorService _callbackExecutor;
|
||||
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
|
||||
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.writeback.limit")
|
||||
int sizeLimit;
|
||||
|
||||
private TxBundle _pendingBundle = null;
|
||||
private int _curSize = 0;
|
||||
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
|
||||
long lastFlushedId,
|
||||
long lastCommittedId) {
|
||||
}
|
||||
|
||||
private final AtomicReference<PendingWriteData> _pendingWrites = new AtomicReference<>(null);
|
||||
|
||||
private final ReentrantLock _pendingBundleLock = new ReentrantLock();
|
||||
private final Object _flushWaitSynchronizer = new Object();
|
||||
|
||||
private final Condition _newBundleCondition = _pendingBundleLock.newCondition();
|
||||
private final Condition _flushCondition = _pendingBundleLock.newCondition();
|
||||
|
||||
private final AtomicLong _lastFlushedId = new AtomicLong(-1);
|
||||
private final AtomicLong _lastCommittedId = new AtomicLong(-1);
|
||||
private final AtomicLong _lastWrittenId = new AtomicLong(-1);
|
||||
private final AtomicLong _lastCommittedId = new AtomicLong();
|
||||
|
||||
private final AtomicLong _waitedTotal = new AtomicLong(0);
|
||||
|
||||
@Inject
|
||||
CachingObjectPersistentStore cachedStore;
|
||||
@ConfigProperty(name = "dhfs.objects.writeback.limit")
|
||||
long sizeLimit;
|
||||
private long currentSize = 0;
|
||||
private ExecutorService _writebackExecutor;
|
||||
private ExecutorService _statusExecutor;
|
||||
|
||||
private volatile boolean _ready = false;
|
||||
|
||||
void init(@Observes @Priority(120) StartupEvent event) {
|
||||
@@ -78,8 +71,8 @@ public class WritebackObjectPersistentStore {
|
||||
try {
|
||||
while (true) {
|
||||
Thread.sleep(1000);
|
||||
if (_curSize > 0)
|
||||
Log.info("Tx commit status: size=" + _curSize / 1024 / 1024 + "MB");
|
||||
if (currentSize > 0)
|
||||
Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB");
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
@@ -89,7 +82,6 @@ public class WritebackObjectPersistentStore {
|
||||
lastTxId = s.id();
|
||||
}
|
||||
_lastCommittedId.set(lastTxId);
|
||||
_lastFlushedId.set(lastTxId);
|
||||
_pendingWrites.set(new PendingWriteData(TreePMap.empty(), lastTxId, lastTxId));
|
||||
_ready = true;
|
||||
}
|
||||
@@ -97,14 +89,11 @@ public class WritebackObjectPersistentStore {
|
||||
void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException {
|
||||
Log.info("Waiting for all transactions to drain");
|
||||
|
||||
_ready = false;
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
while (_curSize > 0) {
|
||||
_flushCondition.await();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
_ready = false;
|
||||
while (currentSize > 0) {
|
||||
_flushWaitSynchronizer.wait();
|
||||
}
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
}
|
||||
|
||||
_writebackExecutor.shutdownNow();
|
||||
@@ -118,19 +107,21 @@ public class WritebackObjectPersistentStore {
|
||||
private void writeback() {
|
||||
while (!Thread.interrupted()) {
|
||||
try {
|
||||
TxBundle bundle;
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
while (_pendingBundle == null)
|
||||
_newBundleCondition.await();
|
||||
bundle = _pendingBundle;
|
||||
_pendingBundle = null;
|
||||
TxBundle bundle = new TxBundle(0);
|
||||
synchronized (_pendingBundles) {
|
||||
while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready)
|
||||
_pendingBundles.wait();
|
||||
|
||||
_curSize -= bundle.size();
|
||||
assert _curSize == 0;
|
||||
_flushCondition.signal();
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
long diff = 0;
|
||||
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var toCompress = _pendingBundles.poll();
|
||||
diff -= toCompress.calculateTotalSize();
|
||||
bundle.compress(toCompress);
|
||||
}
|
||||
diff += bundle.calculateTotalSize();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize += diff;
|
||||
}
|
||||
}
|
||||
|
||||
var toWrite = new ArrayList<Pair<JObjectKey, JDataVersionedWrapper>>();
|
||||
@@ -139,45 +130,57 @@ public class WritebackObjectPersistentStore {
|
||||
for (var e : bundle._entries.values()) {
|
||||
switch (e) {
|
||||
case TxBundle.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> {
|
||||
Log.tracev("Writing new {0}", key);
|
||||
Log.trace("Writing new " + key);
|
||||
toWrite.add(Pair.of(key, data));
|
||||
}
|
||||
case TxBundle.DeletedEntry(JObjectKey key) -> {
|
||||
Log.tracev("Deleting from persistent storage {0}", key);
|
||||
Log.trace("Deleting from persistent storage " + key);
|
||||
toDelete.add(key);
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + e);
|
||||
}
|
||||
}
|
||||
|
||||
cachedStore.commitTx(new TxManifestObj<>(toWrite, toDelete), bundle.id());
|
||||
cachedStore.commitTx(
|
||||
new TxManifestObj<>(
|
||||
Collections.unmodifiableList(toWrite),
|
||||
Collections.unmodifiableList(toDelete)
|
||||
), bundle.getId());
|
||||
|
||||
Log.tracev("Bundle {0} committed", bundle.id());
|
||||
Log.trace("Bundle " + bundle.getId() + " committed");
|
||||
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
while (true) {
|
||||
var curPw = _pendingWrites.get();
|
||||
var curPwMap = curPw.pendingWrites();
|
||||
for (var e : bundle._entries.values()) {
|
||||
var cur = curPwMap.get(e.key());
|
||||
if (cur.bundleId() <= bundle.id())
|
||||
if (cur.bundleId() <= bundle.getId())
|
||||
curPwMap = curPwMap.minus(e.key());
|
||||
}
|
||||
var newCurPw = new PendingWriteData(
|
||||
curPwMap,
|
||||
bundle.id(),
|
||||
bundle.getId(),
|
||||
curPw.lastCommittedId()
|
||||
);
|
||||
_pendingWrites.compareAndSet(curPw, newCurPw);
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
if (_pendingWrites.compareAndSet(curPw, newCurPw))
|
||||
break;
|
||||
}
|
||||
|
||||
_lastFlushedId.set(bundle.id());
|
||||
var callbacks = bundle.callbacks();
|
||||
_callbackExecutor.submit(() -> {
|
||||
callbacks.forEach(Runnable::run);
|
||||
});
|
||||
List<List<Runnable>> callbacks = new ArrayList<>();
|
||||
synchronized (_notFlushedBundles) {
|
||||
_lastWrittenId.set(bundle.getId());
|
||||
while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) {
|
||||
callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted());
|
||||
}
|
||||
}
|
||||
callbacks.forEach(l -> l.forEach(Runnable::run));
|
||||
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize -= bundle.calculateTotalSize();
|
||||
// FIXME:
|
||||
if (currentSize <= sizeLimit || !_ready)
|
||||
_flushWaitSynchronizer.notifyAll();
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
} catch (Exception e) {
|
||||
Log.error("Uncaught exception in writeback", e);
|
||||
@@ -188,99 +191,125 @@ public class WritebackObjectPersistentStore {
|
||||
Log.info("Writeback thread exiting");
|
||||
}
|
||||
|
||||
private long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
|
||||
public long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
|
||||
verifyReady();
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
boolean shouldWake = false;
|
||||
if (_curSize > sizeLimit) {
|
||||
shouldWake = true;
|
||||
long started = System.currentTimeMillis();
|
||||
while (_curSize > sizeLimit)
|
||||
_flushCondition.await();
|
||||
long waited = System.currentTimeMillis() - started;
|
||||
_waitedTotal.addAndGet(waited);
|
||||
if (Log.isTraceEnabled())
|
||||
Log.tracev("Thread {0} waited for tx bundle for {1} ms", Thread.currentThread().getName(), waited);
|
||||
}
|
||||
|
||||
var oursId = _lastCommittedId.incrementAndGet();
|
||||
|
||||
var curBundle = _pendingBundle;
|
||||
int oldSize = 0;
|
||||
if (curBundle != null) {
|
||||
oldSize = curBundle.size();
|
||||
curBundle.setId(oursId);
|
||||
} else {
|
||||
curBundle = new TxBundle(oursId);
|
||||
}
|
||||
|
||||
var curPw = _pendingWrites.get();
|
||||
var curPwMap = curPw.pendingWrites();
|
||||
|
||||
for (var action : writes) {
|
||||
var key = action.key();
|
||||
switch (action) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
// Log.tracev("Flushing object {0}", write.key());
|
||||
var wrapper = new JDataVersionedWrapperImpl(write.data(), oursId);
|
||||
curPwMap = curPwMap.plus(key, new PendingWrite(wrapper, oursId));
|
||||
curBundle.commit(wrapper);
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
// Log.tracev("Deleting object {0}", deleted.key());
|
||||
curPwMap = curPwMap.plus(key, new PendingDelete(key, oursId));
|
||||
curBundle.delete(key);
|
||||
boolean wait = false;
|
||||
while (true) {
|
||||
if (wait) {
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
long started = System.currentTimeMillis();
|
||||
while (currentSize > sizeLimit) {
|
||||
try {
|
||||
_flushWaitSynchronizer.wait();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
long waited = System.currentTimeMillis() - started;
|
||||
_waitedTotal.addAndGet(waited);
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms");
|
||||
wait = false;
|
||||
}
|
||||
}
|
||||
// Now, make the changes visible to new iterators
|
||||
var newCurPw = new PendingWriteData(
|
||||
curPwMap,
|
||||
curPw.lastFlushedId(),
|
||||
oursId
|
||||
);
|
||||
|
||||
_pendingWrites.compareAndSet(curPw, newCurPw);
|
||||
synchronized (_pendingBundles) {
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
if (currentSize > sizeLimit) {
|
||||
if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var target = _pendingBundles.poll();
|
||||
|
||||
_pendingBundle = curBundle;
|
||||
_newBundleCondition.signalAll();
|
||||
long diff = -target.calculateTotalSize();
|
||||
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var toCompress = _pendingBundles.poll();
|
||||
diff -= toCompress.calculateTotalSize();
|
||||
target.compress(toCompress);
|
||||
}
|
||||
diff += target.calculateTotalSize();
|
||||
currentSize += diff;
|
||||
_pendingBundles.addFirst(target);
|
||||
}
|
||||
}
|
||||
|
||||
_curSize += (curBundle.size() - oldSize);
|
||||
if (currentSize > sizeLimit) {
|
||||
wait = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldWake && _curSize < sizeLimit) {
|
||||
_flushCondition.signal();
|
||||
TxBundle bundle;
|
||||
synchronized (_notFlushedBundles) {
|
||||
bundle = new TxBundle(_lastCommittedId.incrementAndGet());
|
||||
_pendingBundles.addLast(bundle);
|
||||
_notFlushedBundles.put(bundle.getId(), bundle);
|
||||
}
|
||||
|
||||
for (var action : writes) {
|
||||
switch (action) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
Log.trace("Flushing object " + write.key());
|
||||
bundle.commit(new JDataVersionedWrapperImpl(write.data(), bundle.getId()));
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
Log.trace("Deleting object " + deleted.key());
|
||||
bundle.delete(deleted.key());
|
||||
}
|
||||
default -> {
|
||||
throw new TxCommitException("Unexpected value: " + action.key());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (true) {
|
||||
var curPw = _pendingWrites.get();
|
||||
var curPwMap = curPw.pendingWrites();
|
||||
for (var e : ((TxBundle) bundle)._entries.values()) {
|
||||
switch (e) {
|
||||
case TxBundle.CommittedEntry c -> {
|
||||
curPwMap = curPwMap.plus(c.key(), new PendingWrite(c.data, bundle.getId()));
|
||||
}
|
||||
case TxBundle.DeletedEntry d -> {
|
||||
curPwMap = curPwMap.plus(d.key(), new PendingDelete(d.key, bundle.getId()));
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + e);
|
||||
}
|
||||
}
|
||||
// Now, make the changes visible to new iterators
|
||||
var newCurPw = new PendingWriteData(
|
||||
curPwMap,
|
||||
curPw.lastFlushedId(),
|
||||
bundle.getId()
|
||||
);
|
||||
|
||||
if (!_pendingWrites.compareAndSet(curPw, newCurPw))
|
||||
continue;
|
||||
|
||||
((TxBundle) bundle).setReady();
|
||||
if (_pendingBundles.peek() == bundle)
|
||||
_pendingBundles.notify();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize += ((TxBundle) bundle).calculateTotalSize();
|
||||
}
|
||||
|
||||
return bundle.getId();
|
||||
}
|
||||
}
|
||||
|
||||
return oursId;
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public void asyncFence(long bundleId, Runnable fn) {
|
||||
verifyReady();
|
||||
if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!");
|
||||
if (_lastFlushedId.get() >= bundleId) {
|
||||
if (_lastWrittenId.get() >= bundleId) {
|
||||
fn.run();
|
||||
return;
|
||||
}
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
if (_lastFlushedId.get() >= bundleId) {
|
||||
synchronized (_notFlushedBundles) {
|
||||
if (_lastWrittenId.get() >= bundleId) {
|
||||
fn.run();
|
||||
return;
|
||||
}
|
||||
var pendingBundle = _pendingBundle;
|
||||
if (pendingBundle == null) {
|
||||
fn.run();
|
||||
return;
|
||||
}
|
||||
pendingBundle.addCallback(fn);
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
_notFlushedBundles.get(bundleId).addCallback(fn);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,8 +350,16 @@ public class WritebackObjectPersistentStore {
|
||||
private final long txId = finalPw.lastCommittedId();
|
||||
|
||||
@Override
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return ListUtils.prepend(new NavigableMapKvIterator<>(_pendingWrites, start, key), _cache.getIterator(start, key));
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new TombstoneMergingKvIterator<>("writeback-ps", start, key,
|
||||
(tS, tK) -> new MappingKvIterator<>(
|
||||
new NavigableMapKvIterator<>(_pendingWrites, tS, tK),
|
||||
e -> switch (e) {
|
||||
case PendingWrite pw -> new Data<>(pw.data());
|
||||
case PendingDelete d -> new Tombstone<>();
|
||||
default -> throw new IllegalStateException("Unexpected value: " + e);
|
||||
}),
|
||||
(tS, tK) -> new MappingKvIterator<>(_cache.getIterator(tS, tK), Data::new));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@@ -331,7 +368,7 @@ public class WritebackObjectPersistentStore {
|
||||
var cached = _pendingWrites.get(name);
|
||||
if (cached != null) {
|
||||
return switch (cached) {
|
||||
case PendingWrite c -> Optional.of(c.value());
|
||||
case PendingWrite c -> Optional.of(c.data());
|
||||
case PendingDelete d -> {
|
||||
yield Optional.empty();
|
||||
}
|
||||
@@ -359,59 +396,72 @@ public class WritebackObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
|
||||
long lastFlushedId,
|
||||
long lastCommittedId) {
|
||||
public interface VerboseReadResult {
|
||||
}
|
||||
|
||||
private static class TxBundle {
|
||||
private final HashMap<JObjectKey, BundleEntry> _entries = new HashMap<>();
|
||||
private final LinkedHashMap<JObjectKey, BundleEntry> _entries = new LinkedHashMap<>();
|
||||
private final ArrayList<Runnable> _callbacks = new ArrayList<>();
|
||||
private int _size = 0;
|
||||
private long _txId;
|
||||
|
||||
ArrayList<Runnable> callbacks() {
|
||||
return _callbacks;
|
||||
}
|
||||
private volatile boolean _ready = false;
|
||||
private long _size = -1;
|
||||
private boolean _wasCommitted = false;
|
||||
|
||||
private TxBundle(long txId) {
|
||||
_txId = txId;
|
||||
}
|
||||
|
||||
public void setId(long id) {
|
||||
_txId = id;
|
||||
public long getId() {
|
||||
return _txId;
|
||||
}
|
||||
|
||||
public long id() {
|
||||
return _txId;
|
||||
|
||||
public void setReady() {
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
public void addCallback(Runnable callback) {
|
||||
_callbacks.add(callback);
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return _size;
|
||||
}
|
||||
|
||||
private void putEntry(BundleEntry entry) {
|
||||
var old = _entries.put(entry.key(), entry);
|
||||
if (old != null) {
|
||||
_size -= old.size();
|
||||
synchronized (_callbacks) {
|
||||
if (_wasCommitted) throw new IllegalStateException();
|
||||
_callbacks.add(callback);
|
||||
}
|
||||
}
|
||||
|
||||
public List<Runnable> setCommitted() {
|
||||
synchronized (_callbacks) {
|
||||
_wasCommitted = true;
|
||||
return Collections.unmodifiableList(_callbacks);
|
||||
}
|
||||
_size += entry.size();
|
||||
}
|
||||
|
||||
public void commit(JDataVersionedWrapper obj) {
|
||||
putEntry(new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize()));
|
||||
synchronized (_entries) {
|
||||
_entries.put(obj.data().key(), new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize()));
|
||||
}
|
||||
}
|
||||
|
||||
public void delete(JObjectKey obj) {
|
||||
putEntry(new DeletedEntry(obj));
|
||||
synchronized (_entries) {
|
||||
_entries.put(obj, new DeletedEntry(obj));
|
||||
}
|
||||
}
|
||||
|
||||
private sealed interface BundleEntry permits CommittedEntry, DeletedEntry {
|
||||
public long calculateTotalSize() {
|
||||
if (_size >= 0) return _size;
|
||||
_size = _entries.values().stream().mapToInt(BundleEntry::size).sum();
|
||||
return _size;
|
||||
}
|
||||
|
||||
public void compress(TxBundle other) {
|
||||
if (_txId >= other._txId)
|
||||
throw new IllegalArgumentException("Compressing an older bundle into newer");
|
||||
|
||||
_txId = other._txId;
|
||||
_size = -1;
|
||||
|
||||
_entries.putAll(other._entries);
|
||||
}
|
||||
|
||||
private interface BundleEntry {
|
||||
JObjectKey key();
|
||||
|
||||
int size();
|
||||
@@ -429,4 +479,10 @@ public class WritebackObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public record VerboseReadResultPersisted(Optional<JDataVersionedWrapper> data) implements VerboseReadResult {
|
||||
}
|
||||
|
||||
public record VerboseReadResultPending(PendingWriteEntry pending) implements VerboseReadResult {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,12 +4,12 @@ import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
@Singleton
|
||||
@ApplicationScoped
|
||||
public class CurrentTransaction implements Transaction {
|
||||
@Inject
|
||||
TransactionManager transactionManager;
|
||||
@@ -25,8 +25,8 @@ public class CurrentTransaction implements Transaction {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key) {
|
||||
return transactionManager.current().get(type, key);
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
return transactionManager.current().get(type, key, strategy);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -43,9 +43,4 @@ public class CurrentTransaction implements Transaction {
|
||||
public <T extends JData> void put(JData obj) {
|
||||
transactionManager.current().put(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> void putNew(JData obj) {
|
||||
transactionManager.current().putNew(obj);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,295 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.snapshot.SnapshotManager;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.enterprise.inject.Instance;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
// Manages all access to com.usatiuk.objects.JData objects.
|
||||
// In particular, it serves as a source of truth for what is committed to the backing storage.
|
||||
// All data goes through it, it is responsible for transaction atomicity
|
||||
// TODO: persistent tx id
|
||||
@ApplicationScoped
|
||||
public class JObjectManager {
|
||||
private final List<PreCommitTxHook> _preCommitTxHooks;
|
||||
@Inject
|
||||
SnapshotManager snapshotManager;
|
||||
@Inject
|
||||
TransactionFactory transactionFactory;
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
private boolean _ready = false;
|
||||
|
||||
JObjectManager(Instance<PreCommitTxHook> preCommitTxHooks) {
|
||||
_preCommitTxHooks = List.copyOf(preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList());
|
||||
Log.debugv("Pre-commit hooks: {0}", String.join("->", _preCommitTxHooks.stream().map(Objects::toString).toList()));
|
||||
}
|
||||
|
||||
private void verifyReady() {
|
||||
if (!_ready) throw new IllegalStateException("Wrong service order!");
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(200) StartupEvent event) {
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
public TransactionPrivate createTransaction() {
|
||||
verifyReady();
|
||||
var tx = transactionFactory.createTransaction();
|
||||
Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id());
|
||||
return tx;
|
||||
}
|
||||
|
||||
public Pair<Collection<Runnable>, TransactionHandle> commit(TransactionPrivate tx) {
|
||||
verifyReady();
|
||||
var writes = new LinkedHashMap<JObjectKey, TxRecord.TxObjectRecord<?>>();
|
||||
var dependenciesLocked = new LinkedHashMap<JObjectKey, Optional<JDataVersionedWrapper>>();
|
||||
Snapshot<JObjectKey, JDataVersionedWrapper> commitSnapshot = null;
|
||||
Map<JObjectKey, TransactionObject<?>> readSet;
|
||||
var toUnlock = new ArrayList<AutoCloseableNoThrow>();
|
||||
|
||||
try {
|
||||
try {
|
||||
long pendingCount = 0;
|
||||
Map<PreCommitTxHook, Map<JObjectKey, TxRecord.TxObjectRecord<?>>> pendingWrites = Map.ofEntries(
|
||||
_preCommitTxHooks.stream().map(p -> Pair.of(p, new HashMap<>())).toArray(Pair[]::new)
|
||||
);
|
||||
Map<PreCommitTxHook, Map<JObjectKey, TxRecord.TxObjectRecord<?>>> lastWrites = Map.ofEntries(
|
||||
_preCommitTxHooks.stream().map(p -> Pair.of(p, new HashMap<>())).toArray(Pair[]::new)
|
||||
);
|
||||
|
||||
for (var n : tx.drainNewWrites()) {
|
||||
for (var hookPut : _preCommitTxHooks) {
|
||||
pendingWrites.get(hookPut).put(n.key(), n);
|
||||
pendingCount++;
|
||||
}
|
||||
writes.put(n.key(), n);
|
||||
}
|
||||
|
||||
// Run hooks for all objects
|
||||
// Every hook should see every change made to every object, yet the object's evolution
|
||||
// should be consistent from the view point of each individual hook
|
||||
// For example, when a hook makes changes to an object, and another hook changes the object before/after it
|
||||
// on the next iteration, the first hook should receive the version of the object it had created
|
||||
// as the "old" version, and the new version with all the changes after it.
|
||||
do {
|
||||
for (var hook : _preCommitTxHooks) {
|
||||
var lastCurHookSeen = lastWrites.get(hook);
|
||||
Function<JObjectKey, JData> getPrev =
|
||||
key -> switch (lastCurHookSeen.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> write.data();
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> null;
|
||||
case null -> tx.getFromSource(JData.class, key).orElse(null);
|
||||
default -> {
|
||||
throw new TxCommitException("Unexpected value: " + writes.get(key));
|
||||
}
|
||||
};
|
||||
|
||||
var curIteration = pendingWrites.get(hook);
|
||||
|
||||
// Log.trace("Commit iteration with " + curIteration.size() + " records for hook " + hook.getClass());
|
||||
|
||||
for (var entry : curIteration.entrySet()) {
|
||||
// Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey());
|
||||
var oldObj = getPrev.apply(entry.getKey());
|
||||
lastCurHookSeen.put(entry.getKey(), entry.getValue());
|
||||
switch (entry.getValue()) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
if (oldObj == null) {
|
||||
hook.onCreate(write.key(), write.data());
|
||||
} else {
|
||||
hook.onChange(write.key(), oldObj, write.data());
|
||||
}
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
hook.onDelete(deleted.key(), oldObj);
|
||||
}
|
||||
default -> throw new TxCommitException("Unexpected value: " + entry);
|
||||
}
|
||||
}
|
||||
|
||||
pendingCount -= curIteration.size();
|
||||
curIteration.clear();
|
||||
|
||||
for (var n : tx.drainNewWrites()) {
|
||||
for (var hookPut : _preCommitTxHooks) {
|
||||
if (hookPut == hook) {
|
||||
lastCurHookSeen.put(n.key(), n);
|
||||
continue;
|
||||
}
|
||||
var before = pendingWrites.get(hookPut).put(n.key(), n);
|
||||
if (before == null)
|
||||
pendingCount++;
|
||||
}
|
||||
writes.put(n.key(), n);
|
||||
}
|
||||
}
|
||||
} while (pendingCount > 0);
|
||||
} catch (Throwable e) {
|
||||
for (var read : tx.reads().entrySet()) {
|
||||
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
|
||||
toUnlock.add(locked.lock());
|
||||
}
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
readSet = tx.reads();
|
||||
|
||||
if (!writes.isEmpty()) {
|
||||
Stream.concat(readSet.keySet().stream(), writes.keySet().stream())
|
||||
.sorted(Comparator.comparing(JObjectKey::toString))
|
||||
.forEach(k -> {
|
||||
var lock = lockManager.lockObject(k);
|
||||
toUnlock.add(lock);
|
||||
});
|
||||
|
||||
commitSnapshot = snapshotManager.createSnapshot();
|
||||
}
|
||||
|
||||
for (var read : readSet.entrySet()) {
|
||||
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
|
||||
toUnlock.add(locked.lock());
|
||||
}
|
||||
}
|
||||
|
||||
if (writes.isEmpty()) {
|
||||
Log.trace("Committing transaction - no changes");
|
||||
|
||||
return Pair.of(
|
||||
Stream.concat(
|
||||
tx.getOnCommit().stream(),
|
||||
tx.getOnFlush().stream()
|
||||
).toList(),
|
||||
new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
runnable.run();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Log.trace("Committing transaction start");
|
||||
var snapshotId = tx.snapshot().id();
|
||||
|
||||
if (snapshotId != commitSnapshot.id()) {
|
||||
for (var read : readSet.entrySet()) {
|
||||
dependenciesLocked.put(read.getKey(), commitSnapshot.readObject(read.getKey()));
|
||||
var dep = dependenciesLocked.get(read.getKey());
|
||||
|
||||
if (dep.isEmpty() != read.getValue().data().isEmpty()) {
|
||||
Log.trace("Checking read dependency " + read.getKey() + " - not found");
|
||||
throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
|
||||
}
|
||||
|
||||
if (dep.isEmpty()) {
|
||||
// TODO: Every write gets a dependency due to hooks
|
||||
continue;
|
||||
// assert false;
|
||||
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
|
||||
}
|
||||
|
||||
if (dep.get().version() > snapshotId) {
|
||||
Log.trace("Checking dependency " + read.getKey() + " - newer than");
|
||||
throw new TxCommitException("Serialization hazard: " + dep.get().data().key() + " " + dep.get().version() + " vs " + snapshotId);
|
||||
}
|
||||
|
||||
Log.trace("Checking dependency " + read.getKey() + " - ok with read");
|
||||
}
|
||||
} else {
|
||||
Log.tracev("Skipped dependency checks: no changes");
|
||||
}
|
||||
|
||||
boolean same = snapshotId == commitSnapshot.id();
|
||||
|
||||
var addFlushCallback = snapshotManager.commitTx(
|
||||
writes.values().stream()
|
||||
.filter(r -> {
|
||||
if (!same)
|
||||
if (r instanceof TxRecord.TxObjectRecordWrite<?>(JData data)) {
|
||||
var dep = dependenciesLocked.get(data.key());
|
||||
if (dep.isPresent() && dep.get().version() > snapshotId) {
|
||||
Log.trace("Skipping write " + data.key() + " - dependency " + dep.get().version() + " vs " + snapshotId);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}).toList());
|
||||
|
||||
for (var callback : tx.getOnFlush()) {
|
||||
addFlushCallback.accept(callback);
|
||||
}
|
||||
|
||||
return Pair.of(
|
||||
List.copyOf(tx.getOnCommit()),
|
||||
new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
addFlushCallback.accept(runnable);
|
||||
}
|
||||
});
|
||||
} catch (Throwable t) {
|
||||
Log.trace("Error when committing transaction", t);
|
||||
throw new TxCommitException(t.getMessage(), t);
|
||||
} finally {
|
||||
for (var unlock : toUnlock) {
|
||||
unlock.close();
|
||||
}
|
||||
if (commitSnapshot != null)
|
||||
commitSnapshot.close();
|
||||
tx.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void rollback(TransactionPrivate tx) {
|
||||
verifyReady();
|
||||
tx.reads().forEach((key, value) -> {
|
||||
if (value instanceof TransactionObjectLocked<?> locked) {
|
||||
locked.lock().close();
|
||||
}
|
||||
});
|
||||
tx.close();
|
||||
}
|
||||
|
||||
// private class TransactionObjectSourceImpl implements TransactionObjectSource {
|
||||
// private final long _txId;
|
||||
//
|
||||
// private TransactionObjectSourceImpl(long txId) {
|
||||
// _txId = txId;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T extends JData> TransactionObject<T> get(Class<T> type, JObjectKey key) {
|
||||
// var got = getObj(type, key);
|
||||
// if (got.data().isPresent() && got.data().get().version() > _txId) {
|
||||
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
|
||||
// }
|
||||
// return got;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T extends JData> TransactionObject<T> getWriteLocked(Class<T> type, JObjectKey key) {
|
||||
// var got = getObjLock(type, key);
|
||||
// if (got.data().isPresent() && got.data().get().version() > _txId) {
|
||||
// got.lock().close();
|
||||
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
|
||||
// }
|
||||
// return got;
|
||||
// }
|
||||
// }
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.DataLocker;
|
||||
import jakarta.annotation.Nonnull;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
@ApplicationScoped
|
||||
public class LockManager {
|
||||
private final DataLocker _objLocker = new DataLocker();
|
||||
|
||||
@Nonnull
|
||||
public AutoCloseableNoThrow lockObject(JObjectKey key) {
|
||||
return _objLocker.lock(key);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public AutoCloseableNoThrow tryLockObject(JObjectKey key) {
|
||||
return _objLocker.tryLock(key);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
public enum LockingStrategy {
|
||||
OPTIMISTIC, // Optimistic write, no blocking other possible writers/readers
|
||||
WRITE, // Write lock, blocks all other writers
|
||||
}
|
||||
@@ -11,13 +11,16 @@ import java.util.Optional;
|
||||
public interface Transaction extends TransactionHandle {
|
||||
void onCommit(Runnable runnable);
|
||||
|
||||
<T extends JData> Optional<T> get(Class<T> type, JObjectKey key);
|
||||
<T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy);
|
||||
|
||||
<T extends JData> void put(JData obj);
|
||||
<T extends JData> void putNew(JData obj);
|
||||
|
||||
void delete(JObjectKey key);
|
||||
|
||||
default <T extends JData> Optional<T> get(Class<T> type, JObjectKey key) {
|
||||
return get(type, key, LockingStrategy.OPTIMISTIC);
|
||||
}
|
||||
|
||||
CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key);
|
||||
|
||||
default CloseableKvIterator<JObjectKey, JData> getIterator(JObjectKey key) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user