1 Commits

Author SHA1 Message Date
7ba219f35e type iterators? 2025-03-13 16:34:53 +01:00
553 changed files with 22738 additions and 13080 deletions

View File

@@ -20,21 +20,26 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: "recursive"
- name: Install sudo for ACT
run: apt-get update && apt-get install -y sudo
if: env.ACT=='true'
- name: Install FUSE
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
- name: Install fuse and maven
run: sudo apt-get update && sudo apt-get install -y libfuse2
- name: User allow other for fuse
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
- name: Download maven
run: |
cd "$HOME"
mkdir maven-bin
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
- name: Dump fuse.conf
run: cat /etc/fuse.conf
- name: Maven info
run: |
echo $GITHUB_PATH
echo $PATH
mvn -v
- name: Set up JDK 21
uses: actions/setup-java@v4
@@ -43,11 +48,8 @@ jobs:
distribution: "zulu"
cache: maven
- name: Build LazyFS
run: cd thirdparty/lazyfs/ && ./build.sh
- name: Test with Maven
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
run: cd dhfs-parent && mvn --batch-mode --update-snapshots package verify
# - name: Build with Maven
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
@@ -55,7 +57,7 @@ jobs:
- uses: actions/upload-artifact@v4
with:
name: DHFS Server Package
path: dhfs-parent/dhfs-fuse/target/quarkus-app
path: dhfs-parent/server/target/quarkus-app
- uses: actions/upload-artifact@v4
if: ${{ always() }}
@@ -87,6 +89,102 @@ jobs:
name: Webui
path: webui/dist
build-native-libs:
strategy:
matrix:
include:
- os: ubuntu-latest
cross: "linux/amd64"
- os: ubuntu-latest
cross: "linux/arm64"
- os: macos-latest
runs-on: ${{ matrix.os }}
env:
DO_LOCAL_BUILD: ${{ matrix.os == 'macos-latest' }}
DOCKER_PLATFORM: ${{ matrix.cross || 'NATIVE' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set SANITIZED_DOCKER_PLATFORM
run: echo "SANITIZED_DOCKER_PLATFORM=$(echo $DOCKER_PLATFORM | tr / _ )" >> $GITHUB_ENV
- name: Set DOCKER_BUILDER_IMAGE
run: echo "DOCKER_BUILDER_IMAGE=dhfs_lib_builder-${{matrix.os}}-$SANITIZED_DOCKER_PLATFORM" >> $GITHUB_ENV
- name: Build config
run: |
echo DO_LOCAL_BUILD: $DO_LOCAL_BUILD
echo DOCKER_PLATFORM: $DOCKER_PLATFORM
echo SANITIZED_DOCKER_PLATFORM: $SANITIZED_DOCKER_PLATFORM
echo DOCKER_BUILDER_IMAGE: $DOCKER_BUILDER_IMAGE
- name: Set up JDK 21
if: ${{ env.DO_LOCAL_BUILD == 'TRUE' }}
uses: actions/setup-java@v4
with:
java-version: "21"
distribution: "zulu"
cache: maven
- name: Set up Docker Buildx
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/setup-buildx-action@v3
- name: Set up QEMU
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/setup-qemu-action@v3
- name: Build Docker builder image
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/build-push-action@v5
with:
context: ./libdhfs_support/builder
file: ./libdhfs_support/builder/Dockerfile
push: false
platforms: ${{ env.DOCKER_PLATFORM }}
tags: ${{ env.DOCKER_BUILDER_IMAGE }}
cache-from: type=gha,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
cache-to: type=gha,mode=max,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
load: true
- name: Build the library
run: |
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result"
- name: Upload build
uses: actions/upload-artifact@v4
with:
name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }}
path: result
merge-native-libs:
runs-on: ubuntu-latest
needs: [build-native-libs]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: downloaded-libs
- name: Merge all
run: rsync -av downloaded-libs/NativeLib*/* result/
- name: Check that libs exists
run: |
test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1
- name: Upload
uses: actions/upload-artifact@v4
with:
name: NativeLibs
path: result
publish-docker:
runs-on: ubuntu-latest
permissions:
@@ -96,7 +194,7 @@ jobs:
# with sigstore/fulcio when running outside of PRs.
id-token: write
needs: [build-webui, build-dhfs]
needs: [build-webui, merge-native-libs, build-dhfs]
steps:
- name: Checkout repository
@@ -114,6 +212,12 @@ jobs:
name: Webui
path: webui-dist-downloaded
- name: Download native libs
uses: actions/download-artifact@v4
with:
name: NativeLibs
path: dhfs-native-downloaded
- name: Show all the files
run: find .
@@ -189,7 +293,7 @@ jobs:
# with sigstore/fulcio when running outside of PRs.
id-token: write
needs: [build-webui, build-dhfs]
needs: [build-webui, merge-native-libs, build-dhfs]
steps:
- name: Checkout repository
@@ -205,6 +309,11 @@ jobs:
name: Webui
path: webui-dist-downloaded
- uses: actions/download-artifact@v4
with:
name: NativeLibs
path: dhfs-native-downloaded
- name: Show all the files
run: find .
@@ -212,11 +321,14 @@ jobs:
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
- name: Copy DHFS
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
- name: Copy Webui
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
- name: Copy Webui
run: cp -r ./dhfs-native-downloaded "run-wrapper-out/dhfs/app/NativeLibs"
- name: Copy run wrapper
run: cp -r ./run-wrapper/* "run-wrapper-out/dhfs/app/"

3
.gitmodules vendored
View File

@@ -1,3 +0,0 @@
[submodule "thirdparty/lazyfs/lazyfs"]
path = thirdparty/lazyfs/lazyfs
url = git@github.com:dsrhaslab/lazyfs.git

View File

@@ -9,6 +9,8 @@ COPY ./dhfs-package-downloaded/*.jar .
COPY ./dhfs-package-downloaded/app .
COPY ./dhfs-package-downloaded/quarkus .
WORKDIR /usr/src/app/native-libs
COPY ./dhfs-native-downloaded/. .
WORKDIR /usr/src/app/webui
COPY ./webui-dist-downloaded/. .

View File

@@ -14,9 +14,6 @@ Syncthing and allowing you to stream your files like Google Drive File Stream
This is a simple wrapper around the jar/web ui distribution that allows you to run/stop
the DHFS server in the background, and update itself (hopefully!)
## How to use it?
## How to use it and how it works?
Unpack the run-wrapper and run the `run` script. The filesystem should be mounted to the `fuse` folder in the run-wrapper root directory.
Then, a web interface will be available at `losthost:8080`, that can be used to connect with other peers.
TODO 😁

View File

@@ -1,11 +1,11 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main" />
<module name="server" />
<option name="VM_PARAMETERS" value="--add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>

View File

@@ -1,11 +1,11 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main" />
<module name="server" />
<option name="VM_PARAMETERS" value="--add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>

View File

@@ -0,0 +1,60 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap-deployment</artifactId>
<name>Autoprotomap - Deployment</name>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc-deployment</artifactId>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5-internal</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc-deployment</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<id>default-compile</id>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-processor</artifactId>
<version>${quarkus.platform.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,78 @@
package com.usatiuk.autoprotomap.deployment;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.arc.deployment.GeneratedBeanBuildItem;
import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor;
import io.quarkus.deployment.annotations.BuildProducer;
import io.quarkus.deployment.annotations.BuildStep;
import io.quarkus.deployment.builditem.ApplicationIndexBuildItem;
import io.quarkus.gizmo.ClassCreator;
import io.quarkus.gizmo.SignatureBuilder;
import jakarta.inject.Singleton;
import org.jboss.jandex.ClassType;
import org.jboss.jandex.Type;
class AutoprotomapProcessor {
@BuildStep
ProtoIndexBuildItem index(ApplicationIndexBuildItem jandex) {
var ret = new ProtoIndexBuildItem();
var annot = jandex.getIndex().getAnnotations(ProtoMirror.class);
for (var a : annot) {
var protoTarget = jandex.getIndex().getClassByName(((ClassType) a.value().value()).name());
// if (!messageImplementors.contains(protoTarget))
// throw new IllegalArgumentException("Expected " + protoTarget + " to be a proto message");
System.out.println("Found: " + a.name().toString() + " at " + protoTarget.name().toString() + " of " + a.target().asClass().name().toString());
ret.protoMsgToObj.put(protoTarget, a.target().asClass());
}
return ret;
}
@BuildStep
void generateProtoSerializer(ApplicationIndexBuildItem jandex,
ProtoIndexBuildItem protoIndex,
BuildProducer<GeneratedBeanBuildItem> generatedClasses) {
try {
for (var o : protoIndex.protoMsgToObj.entrySet()) {
System.out.println("Generating " + o.getKey().toString() + " -> " + o.getValue().toString());
var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedClasses);
var msgType = io.quarkus.gizmo.Type.classType(o.getKey().name());
var objType = io.quarkus.gizmo.Type.classType(o.getValue().name());
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
msgType, objType);
var msgJType = Type.create(o.getKey().name(), Type.Kind.CLASS);
var objJType = Type.create(o.getValue().name(), Type.Kind.CLASS);
try (ClassCreator classCreator = ClassCreator.builder()
.className("com.usatiuk.autoprotomap.generated.for" + o.getKey().simpleName())
.signature(SignatureBuilder.forClass().addInterface(type))
.classOutput(gizmoAdapter)
.setFinal(true)
.build()) {
classCreator.addAnnotation(Singleton.class);
var generator = new ProtoSerializerGenerator(
jandex.getIndex(),
protoIndex,
classCreator,
msgJType,
objJType
);
generator.generate();
}
}
} catch (Throwable e) {
StringBuilder sb = new StringBuilder();
sb.append(e + "\n");
for (var el : e.getStackTrace()) {
sb.append(el.toString() + "\n");
}
System.out.println(sb);
}
}
}

View File

@@ -0,0 +1,18 @@
package com.usatiuk.autoprotomap.deployment;
public class Constants {
public static final String FIELD_PREFIX = "_";
public static String capitalize(String str) {
return str.substring(0, 1).toUpperCase() + str.substring(1);
}
public static String stripPrefix(String str, String prefix) {
if (str.startsWith(prefix)) {
return str.substring(prefix.length());
}
return str;
}
}

View File

@@ -0,0 +1,6 @@
package com.usatiuk.autoprotomap.deployment;
@FunctionalInterface
public interface Effect {
void apply();
}

View File

@@ -0,0 +1,10 @@
package com.usatiuk.autoprotomap.deployment;
import io.quarkus.builder.item.SimpleBuildItem;
import org.apache.commons.collections4.BidiMap;
import org.apache.commons.collections4.bidimap.DualHashBidiMap;
import org.jboss.jandex.ClassInfo;
public final class ProtoIndexBuildItem extends SimpleBuildItem {
BidiMap<ClassInfo, ClassInfo> protoMsgToObj = new DualHashBidiMap<>();
}

View File

@@ -0,0 +1,342 @@
package com.usatiuk.autoprotomap.deployment;
import com.google.protobuf.ByteString;
import com.google.protobuf.Message;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.gizmo.*;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.jboss.jandex.Type;
import org.jboss.jandex.*;
import org.objectweb.asm.Opcodes;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntConsumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.usatiuk.autoprotomap.deployment.Constants.*;
public class ProtoSerializerGenerator {
private final Index index;
private final ProtoIndexBuildItem protoIndex;
private final ClassCreator classCreator;
private final HashSet<Pair<ClassInfo, ClassInfo>> externalSerializers = new HashSet<>();
private final Type topMessageType;
private final Type topObjectType;
public ProtoSerializerGenerator(Index index, ProtoIndexBuildItem protoIndex, ClassCreator classCreator, Type topMessageType, Type topObjectType) {
this.index = index;
this.protoIndex = protoIndex;
this.classCreator = classCreator;
this.topMessageType = topMessageType;
this.topObjectType = topObjectType;
}
private FieldDescriptor getOutsideSerializer(ClassInfo messageClass, ClassInfo objectClass) {
var name = messageClass.name().withoutPackagePrefix() + objectClass.name().withoutPackagePrefix() + "serializer";
var msgType = io.quarkus.gizmo.Type.classType(messageClass.name());
var objType = io.quarkus.gizmo.Type.classType(objectClass.name());
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
msgType, objType);
var sig = SignatureBuilder.forField().setType(type).build();
var fd = FieldDescriptor.of(classCreator.getClassName(), name, ProtoSerializer.class);
if (externalSerializers.add(Pair.of(messageClass, objectClass))) {
var fc = classCreator.getFieldCreator(fd);
fc.addAnnotation(Inject.class);
fc.setSignature(sig);
fc.setModifiers(Opcodes.ACC_PUBLIC);
}
return fd;
}
private void traverseHierarchy(Index index, ClassInfo klass, Consumer<ClassInfo> visitor) {
var cur = klass;
while (true) {
visitor.accept(cur);
var next = cur.superClassType().name();
if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break;
cur = index.getClassByName(next);
}
}
private ArrayList<FieldInfo> findAllFields(Index index, ClassInfo klass) {
ArrayList<FieldInfo> ret = new ArrayList<>();
traverseHierarchy(index, klass, cur -> {
ret.addAll(cur.fields());
});
return ret;
}
private void generateBuilderUse(BytecodeCreator bytecodeCreator,
ResultHandle builder,
Type messageType, Type objectType,
ResultHandle object) {
var builderType = Type.create(DotName.createComponentized(messageType.name(), "Builder", true), Type.Kind.CLASS);
var objectClass = index.getClassByName(objectType.name().toString());
Function<String, String> getterGetter = objectClass.isRecord()
? Function.identity()
: s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX));
for (var f : findAllFields(index, objectClass)) {
var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX);
Supplier<ResultHandle> get = () -> {
if ((f.flags() & Opcodes.ACC_PUBLIC) != 0)
return bytecodeCreator.readInstanceField(f, object);
else {
var fieldGetter = getterGetter.apply(f.name());
return bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object);
}
};
Effect doSimpleCopy = () -> {
var setter = MethodDescriptor.ofMethod(builderType.name().toString(), "set" + capitalize(consideredFieldName),
builderType.name().toString(), f.type().toString());
var val = get.get();
bytecodeCreator.invokeVirtualMethod(setter, builder, val);
};
switch (f.type().kind()) {
case CLASS -> {
if (f.type().equals(Type.create(String.class)) || f.type().equals(Type.create(ByteString.class))) {
doSimpleCopy.apply();
} else {
var builderGetter = "get" + capitalize(f.name()) + "Builder";
var protoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(f.type().name()));
var nestedBuilderType = Type.create(DotName.createComponentized(protoType.name(), "Builder", true), Type.Kind.CLASS);
var nestedBuilder = bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(builderType.toString(), builderGetter, nestedBuilderType.name().toString()), builder);
var val = get.get();
generateBuilderUse(bytecodeCreator, nestedBuilder, Type.create(protoType.name(), Type.Kind.CLASS), f.type(), val);
}
}
case PRIMITIVE -> {
doSimpleCopy.apply();
}
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
case PARAMETERIZED_TYPE ->
throw new UnsupportedOperationException("Parametrized types not supported yet");
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
default -> throw new IllegalStateException("Unexpected type: " + f.type());
}
}
}
private ResultHandle generateConstructorUse(
BytecodeCreator bytecodeCreator,
ClassCreator classCreator,
Type messageType, Type objectType,
ResultHandle message
) {
var constructor = findAllArgsConstructor(index, index.getClassByName(objectType.name()));
if (constructor == null) {
throw new IllegalStateException("No constructor found for type: " + objectType.name());
}
var argMap = new ResultHandle[constructor.parametersCount()];
for (int i = 0; i < argMap.length; i++) {
var type = constructor.parameterType(i);
var strippedName = stripPrefix(constructor.parameterName(i), FIELD_PREFIX);
IntConsumer doSimpleCopy = (arg) -> {
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
type.name().toString());
argMap[arg] = bytecodeCreator.invokeVirtualMethod(call, message);
};
switch (type.kind()) {
case CLASS -> {
if (type.equals(Type.create(String.class)) || type.equals(Type.create(ByteString.class))) {
doSimpleCopy.accept(i);
} else {
var nestedProtoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(type.name()));
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
nestedProtoType.name().toString());
var nested = bytecodeCreator.invokeVirtualMethod(call, message);
argMap[i] = generateConstructorUse(bytecodeCreator, classCreator, Type.create(nestedProtoType.name(), Type.Kind.CLASS), type, nested);
}
}
case PRIMITIVE -> {
doSimpleCopy.accept(i);
}
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
case PARAMETERIZED_TYPE ->
throw new UnsupportedOperationException("Parametrized types not supported yet");
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
default -> throw new IllegalStateException("Unexpected type: " + type);
}
}
return bytecodeCreator.newInstance(constructor, argMap);
}
private MethodInfo findAllArgsConstructor(Index index, ClassInfo klass) {
ArrayList<FieldInfo> fields = findAllFields(index, klass);
var fieldCount = fields.size();
var fieldNames = fields.stream().map(f -> stripPrefix(f.name(), FIELD_PREFIX)).sorted().toList();
var fieldNameToType = fields.stream().collect(Collectors.toMap(f -> stripPrefix(f.name(), FIELD_PREFIX), FieldInfo::type));
for (var m : klass.constructors()) {
if (m.parametersCount() != fieldCount) continue;
var parameterNames = m.parameters().stream().map(n -> stripPrefix(n.name(), FIELD_PREFIX)).sorted().toList();
if (!Objects.equals(fieldNames, parameterNames)) continue;
for (var p : m.parameters()) {
if (!Objects.equals(fieldNameToType.get(stripPrefix(p.name(), FIELD_PREFIX)), p.type())) continue;
}
return m;
}
return null;
}
public void generateAbstract() {
var kids = Stream.concat(index.getAllKnownSubclasses(topObjectType.name()).stream(),
index.getAllKnownImplementors(topObjectType.name()).stream())
.filter(k -> !k.isAbstract() && !k.isInterface()).toList();
try (MethodCreator method = classCreator.getMethodCreator("serialize",
Message.class, Object.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
var arg = method.getMethodParam(0);
for (var nestedObjClass : kids) {
System.out.println("Generating " + nestedObjClass.name() + " serializer for " + topObjectType.name());
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
boolean doExternalCall = false;
if (nestedMessageClass == null) {
var msgInfo = index.getClassByName(topMessageType.name());
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
doExternalCall = true;
}
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
var statement = method.ifTrue(method.instanceOf(arg, nestedObjClass.name().toString()));
try (var branch = statement.trueBranch()) {
if (doExternalCall) {
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
var serialized = branch.invokeInterfaceMethod(
MethodDescriptor.ofMethod(ProtoSerializer.class,
"serialize", Message.class, Object.class),
serializerLoaded, arg);
branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
"set" + capitalize(nestedObjType.name().withoutPackagePrefix()),
builderType.name().toString(), nestedMessageType.name().toString()), builder, serialized);
} else {
var nestedBuilderType = Type.create(DotName.createComponentized(nestedMessageType.name(), "Builder", true), Type.Kind.CLASS);
var nestedBuilder = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()) + "Builder",
nestedBuilderType.name().toString()), builder);
generateBuilderUse(branch, nestedBuilder, nestedMessageType, nestedObjType, arg);
}
var result = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
branch.returnValue(result);
}
}
method.throwException(IllegalArgumentException.class, "Unknown object type");
}
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
Object.class, Message.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var arg = method.getMethodParam(0);
for (var nestedObjClass : kids) {
System.out.println("Generating " + nestedObjClass.name() + " deserializer for " + topObjectType.name());
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
boolean doExternalCall = false;
if (nestedMessageClass == null) {
var msgInfo = index.getClassByName(topMessageType.name());
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
doExternalCall = true;
}
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
var typeCheck = method.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
"has" + capitalize(nestedObjType.name().withoutPackagePrefix()), boolean.class), arg);
var statement = method.ifTrue(typeCheck);
try (var branch = statement.trueBranch()) {
var nestedMessage = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()), nestedMessageType.name().toString()), arg);
if (doExternalCall) {
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
branch.returnValue(branch.invokeInterfaceMethod(
MethodDescriptor.ofMethod(ProtoSerializer.class,
"deserialize", Object.class, Message.class),
serializerLoaded, nestedMessage));
} else {
branch.returnValue(generateConstructorUse(branch, classCreator, nestedMessageType, nestedObjType, nestedMessage));
}
}
}
method.throwException(IllegalArgumentException.class, "Unknown object type");
}
}
public void generate() {
var objInfo = index.getClassByName(topObjectType.name());
if (objInfo.isAbstract() || objInfo.isInterface()) {
generateAbstract();
return;
}
try (MethodCreator method = classCreator.getMethodCreator("serialize",
Message.class, Object.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
var arg = method.getMethodParam(0);
generateBuilderUse(method, builder, topMessageType, topObjectType, arg);
var result = method.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
method.returnValue(result);
}
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
Object.class, Message.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var arg = method.getMethodParam(0);
method.returnValue(generateConstructorUse(method, classCreator, topMessageType, topObjectType, arg));
}
}
}

View File

@@ -0,0 +1,22 @@
package com.usatiuk.autoprotomap.test;
import io.quarkus.test.QuarkusDevModeTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class AutoprotomapDevModeTest {
// Start hot reload (DevMode) test with your extension loaded
@RegisterExtension
static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnDevModeTest() {
// Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information
Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName());
}
}

View File

@@ -0,0 +1,22 @@
package com.usatiuk.autoprotomap.test;
import io.quarkus.test.QuarkusUnitTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class AutoprotomapTest {
// Start unit test with your extension loaded
@RegisterExtension
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnUnitTest() {
// Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information
Assertions.assertTrue(true, "Add some assertions to " + getClass().getName());
}
}

View File

@@ -0,0 +1,107 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap-integration-tests</artifactId>
<name>Autoprotomap - Integration Tests</name>
<properties>
<skipITs>true</skipITs>
</properties>
<dependencies>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-deployment</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<systemPropertyVariables>
<native.image.path>${project.build.directory}/${project.build.finalName}-runner
</native.image.path>
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
<maven.home>${maven.home}</maven.home>
</systemPropertyVariables>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>native-image</id>
<activation>
<property>
<name>native</name>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>${native.surefire.skip}</skipTests>
</configuration>
</plugin>
</plugins>
</build>
<properties>
<skipITs>false</skipITs>
<quarkus.native.enabled>true</quarkus.native.enabled>
</properties>
</profile>
</profiles>
</project>

View File

@@ -0,0 +1,7 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(AbstractProto.class)
public abstract class AbstractObject {
}

View File

@@ -0,0 +1,10 @@
package com.usatiuk.autoprotomap.it;
import lombok.AllArgsConstructor;
import lombok.Getter;
@AllArgsConstructor
@Getter
public class CustomObject extends AbstractObject {
public int testNum = 0;
}

View File

@@ -0,0 +1,17 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import jakarta.inject.Singleton;
@Singleton
public class CustomObjectSerializer implements ProtoSerializer<CustomObjectProto, CustomObject> {
@Override
public CustomObject deserialize(CustomObjectProto message) {
return new CustomObject(2);
}
@Override
public CustomObjectProto serialize(CustomObject object) {
return CustomObjectProto.newBuilder().setTest(1).build();
}
}

View File

@@ -0,0 +1,8 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(InterfaceObjectProto.class)
public interface InterfaceObject {
String key();
}

View File

@@ -0,0 +1,15 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import lombok.AllArgsConstructor;
import lombok.Getter;
@ProtoMirror(NestedObjectProto.class)
@AllArgsConstructor
@Getter
public class NestedObject extends AbstractObject {
public SimpleObject object;
public String _nestedName;
public ByteString _nestedSomeBytes;
}

View File

@@ -0,0 +1,7 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(RecordObjectProto.class)
public record RecordObject(String key) implements InterfaceObject {
}

View File

@@ -0,0 +1,7 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(RecordObject2Proto.class)
public record RecordObject2(String key, int value) implements InterfaceObject {
}

View File

@@ -0,0 +1,15 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import lombok.AllArgsConstructor;
import lombok.Getter;
@ProtoMirror(SimpleObjectProto.class)
@AllArgsConstructor
@Getter
public class SimpleObject extends AbstractObject {
public int numfield = 0;
private String name;
public ByteString someBytes;
}

View File

@@ -0,0 +1,47 @@
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.usatiuk.autoprotomap.it";
option java_outer_classname = "TestProto";
package autoprotomap.test;
message SimpleObjectProto {
int32 numfield = 1;
string name = 2;
bytes someBytes = 3;
}
message NestedObjectProto {
SimpleObjectProto object = 1;
string nestedName = 2;
bytes nestedSomeBytes = 3;
}
message CustomObjectProto {
int64 test = 1;
}
message AbstractProto {
oneof obj {
NestedObjectProto nestedObject = 1;
SimpleObjectProto simpleObject = 2;
CustomObjectProto customObject = 3;
}
}
message RecordObjectProto {
string key = 1;
}
message RecordObject2Proto {
string key = 1;
int32 value = 2;
}
message InterfaceObjectProto {
oneof obj {
RecordObjectProto recordObject = 1;
RecordObject2Proto recordObject2 = 2;
}
}

View File

@@ -0,0 +1 @@
quarkus.package.jar.decompiler.enabled=true

View File

@@ -0,0 +1,7 @@
package com.usatiuk.autoprotomap.it;
import io.quarkus.test.junit.QuarkusIntegrationTest;
@QuarkusIntegrationTest
public class AutoprotomapResourceIT extends AutoprotomapResourceTest {
}

View File

@@ -0,0 +1,113 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.test.junit.QuarkusTest;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
@QuarkusTest
public class AutoprotomapResourceTest {
@Inject
ProtoSerializer<SimpleObjectProto, SimpleObject> simpleProtoSerializer;
@Inject
ProtoSerializer<NestedObjectProto, NestedObject> nestedProtoSerializer;
@Inject
ProtoSerializer<AbstractProto, AbstractObject> abstractProtoSerializer;
@Inject
ProtoSerializer<InterfaceObjectProto, InterfaceObject> interfaceProtoSerializer;
@Test
public void testSimple() {
var ret = simpleProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
Assertions.assertEquals(1234, ret.getNumfield());
Assertions.assertEquals("simple test", ret.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSomeBytes());
var des = simpleProtoSerializer.deserialize(ret);
Assertions.assertEquals(1234, des.getNumfield());
Assertions.assertEquals("simple test", des.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
}
@Test
public void testNested() {
var ret = nestedProtoSerializer.serialize(
new NestedObject(
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
Assertions.assertEquals(333, ret.getObject().getNumfield());
Assertions.assertEquals("nested so", ret.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", ret.getNestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedSomeBytes());
var des = nestedProtoSerializer.deserialize(ret);
Assertions.assertEquals(333, des.object.numfield);
Assertions.assertEquals(333, des.getObject().getNumfield());
Assertions.assertEquals("nested so", des.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", des.get_nestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
}
@Test
public void testAbstractSimple() {
var ret = abstractProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
Assertions.assertEquals(1234, ret.getSimpleObject().getNumfield());
Assertions.assertEquals("simple test", ret.getSimpleObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSimpleObject().getSomeBytes());
var des = (SimpleObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(1234, des.getNumfield());
Assertions.assertEquals("simple test", des.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
}
@Test
public void testAbstractCustom() {
var ret = abstractProtoSerializer.serialize(new CustomObject(1234));
Assertions.assertEquals(1, ret.getCustomObject().getTest());
var des = (CustomObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(2, des.getTestNum());
}
@Test
public void testAbstractNested() {
var ret = abstractProtoSerializer.serialize(
new NestedObject(
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
Assertions.assertEquals(333, ret.getNestedObject().getObject().getNumfield());
Assertions.assertEquals("nested so", ret.getNestedObject().getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getNestedObject().getObject().getSomeBytes());
Assertions.assertEquals("nested obj", ret.getNestedObject().getNestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedObject().getNestedSomeBytes());
var des = (NestedObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(333, des.object.numfield);
Assertions.assertEquals(333, des.getObject().getNumfield());
Assertions.assertEquals("nested so", des.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", des.get_nestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
}
@Test
public void testInterface() {
var ret = interfaceProtoSerializer.serialize(new RecordObject("record test"));
Assertions.assertEquals("record test", ret.getRecordObject().getKey());
var des = (RecordObject) interfaceProtoSerializer.deserialize(ret);
Assertions.assertEquals("record test", des.key());
var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234));
Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey());
Assertions.assertEquals(1234, ret2.getRecordObject2().getValue());
var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2);
Assertions.assertEquals("record test 2", des2.key());
Assertions.assertEquals(1234, des2.value());
}
}

View File

@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Autoprotomap - Parent</name>
<modules>
<module>deployment</module>
<module>runtime</module>
<module>integration-tests</module>
</modules>
</project>

View File

@@ -0,0 +1,63 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap</artifactId>
<name>Autoprotomap - Runtime</name>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>extension-descriptor</goal>
</goals>
<configuration>
<deployment>${project.groupId}:${project.artifactId}-deployment:${project.version}
</deployment>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<id>default-compile</id>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-processor</artifactId>
<version>${quarkus.platform.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,12 @@
package com.usatiuk.autoprotomap.runtime;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.CLASS)
@Target(ElementType.TYPE)
public @interface ProtoMirror {
Class<?> value() default Object.class;
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs;
package com.usatiuk.autoprotomap.runtime;
import com.google.protobuf.Message;

View File

@@ -0,0 +1,9 @@
name: Autoprotomap
#description: Do something useful.
metadata:
# keywords:
# - autoprotomap
# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension
# categories:
# - "miscellaneous"
# status: "preview"

View File

@@ -1,13 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
@Override
public int estimateSize() {
return data.size();
}
}

View File

@@ -1,26 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.ProtoSerializer;
import com.usatiuk.dhfs.persistence.ChunkDataP;
import com.usatiuk.dhfs.persistence.JObjectKeyP;
import com.usatiuk.objects.JObjectKey;
import jakarta.inject.Singleton;
@Singleton
public class ChunkDataProtoSerializer implements ProtoSerializer<ChunkDataP, ChunkData> {
@Override
public ChunkData deserialize(ChunkDataP message) {
return new ChunkData(
JObjectKey.of(message.getKey().getName()),
message.getData()
);
}
@Override
public ChunkDataP serialize(ChunkData object) {
return ChunkDataP.newBuilder()
.setKey(JObjectKeyP.newBuilder().setName(object.key().value()).build())
.setData(object.data())
.build();
}
}

View File

@@ -1,51 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jmap.JMapHolder;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.Set;
public record File(JObjectKey key, long mode, long cTime, long mTime,
boolean symlink
) implements JDataRemote, JMapHolder<JMapLongKey> {
public File withSymlink(boolean symlink) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withMode(long mode) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withCTime(long cTime) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withMTime(long mTime) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withCurrentMTime() {
return new File(key, mode, cTime, System.currentTimeMillis(), symlink);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return Set.of();
// return Set.copyOf(chunks().values());
}
@Override
public int estimateSize() {
return 64;
// return chunks.size() * 64;
}
@Override
public Class<? extends JDataRemoteDto> dtoClass() {
return FileDto.class;
}
}

View File

@@ -1,15 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
@Override
public Class<? extends JDataRemote> objClass() {
return File.class;
}
}

View File

@@ -1,24 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.syncmap.DtoMapper;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
@ApplicationScoped
public class FileDtoMapper implements DtoMapper<File, FileDto> {
@Inject
JMapHelper jMapHelper;
@Inject
FileHelper fileHelper;
@Override
public FileDto toDto(File obj) {
return new FileDto(obj, fileHelper.getChunks(obj));
}
@Override
public File fromDto(FileDto dto) {
throw new UnsupportedOperationException();
}
}

View File

@@ -1,36 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.objects.JObjectKey;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import java.util.ArrayList;
import java.util.List;
@ApplicationScoped
public class FileHelper {
@Inject
JMapHelper jMapHelper;
public List<Pair<Long, JObjectKey>> getChunks(File file) {
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
try (var it = jMapHelper.getIterator(file)) {
while (it.hasNext()) {
var cur = it.next();
chunks.add(Pair.of(cur.getKey().key(), cur.getValue().ref()));
}
}
return List.copyOf(chunks);
}
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
jMapHelper.deleteAll(file);
for (var f : chunks) {
jMapHelper.put(file, JMapLongKey.of(f.getLeft()), f.getRight());
}
}
}

View File

@@ -1,25 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.ProtoSerializer;
import com.usatiuk.dhfs.persistence.FileDtoP;
import com.usatiuk.utils.SerializationHelper;
import jakarta.inject.Singleton;
import java.io.IOException;
@Singleton
public class FileProtoSerializer implements ProtoSerializer<FileDtoP, FileDto> {
@Override
public FileDto deserialize(FileDtoP message) {
try (var is = message.getSerializedData().newInput()) {
return SerializationHelper.deserialize(is);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public FileDtoP serialize(FileDto object) {
return FileDtoP.newBuilder().setSerializedData(SerializationHelper.serialize(object)).build();
}
}

View File

@@ -1,236 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.peersync.PeerId;
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
import com.usatiuk.dhfs.remoteobj.*;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.pcollections.HashPMap;
import org.pcollections.HashTreePMap;
import org.pcollections.PMap;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Objects;
@ApplicationScoped
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
@Inject
Transaction curTx;
@Inject
PersistentPeerDataService persistentPeerDataService;
@Inject
JMapHelper jMapHelper;
@Inject
RemoteTransaction remoteTx;
@Inject
FileHelper fileHelper;
@Inject
JKleppmannTreeManager jKleppmannTreeManager;
@Inject
DhfsFileService fileService;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs")).orElseThrow();
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC).orElseThrow();
}
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
@Nullable FileDto receivedData) {
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
if (!oursCurMeta.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + oursCurMeta.knownType() + " vs " + File.class);
if (!oursCurMeta.knownType().equals(File.class))
oursCurMeta = oursCurMeta.withKnownType(File.class);
curTx.put(oursCurMeta);
var oursCurFile = remoteTx.getDataLocal(File.class, key).orElse(null);
if (oursCurFile == null)
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
var theirsFile = receivedData.file();
var oursChunks = fileHelper.getChunks(oursCurFile);
File first;
File second;
List<Pair<Long, JObjectKey>> firstChunks;
List<Pair<Long, JObjectKey>> secondChunks;
PeerId otherHostname;
if (oursCurFile.mTime() >= theirsFile.mTime()) {
first = oursCurFile;
firstChunks = oursChunks;
second = theirsFile;
secondChunks = receivedData.chunks();
otherHostname = from;
} else {
second = oursCurFile;
secondChunks = oursChunks;
first = theirsFile;
firstChunks = receivedData.chunks();
otherHostname = persistentPeerDataService.getSelfUuid();
}
Log.tracev("Conflict resolution: ours: {0}, theirs: {1}, chunks: {2}, {3}", oursCurFile, theirsFile, oursChunks, receivedData.chunks());
Log.tracev("Conflict resolution: first: {0}, second: {1}, chunks: {2}, {3}", first, second, firstChunks, secondChunks);
HashPMap<PeerId, Long> newChangelog = HashTreePMap.from(oursCurMeta.changelog());
for (var entry : receivedChangelog.entrySet()) {
newChangelog = newChangelog.plus(entry.getKey(),
Long.max(newChangelog.getOrDefault(entry.getKey(), 0L), entry.getValue())
);
}
oursCurMeta = oursCurMeta.withChangelog(newChangelog);
curTx.put(oursCurMeta);
boolean chunksDiff = !Objects.equals(firstChunks, secondChunks);
boolean wasChanged = first.mTime() != second.mTime()
|| first.cTime() != second.cTime()
|| first.mode() != second.mode()
|| first.symlink() != second.symlink()
|| chunksDiff;
if (wasChanged) {
oursCurMeta = oursCurMeta.withChangelog(
newChangelog.plus(persistentPeerDataService.getSelfUuid(), newChangelog.getOrDefault(persistentPeerDataService.getSelfUuid(), 0L) + 1)
);
curTx.put(oursCurMeta);
remoteTx.putDataRaw(oursCurFile.withCTime(first.cTime()).withMTime(first.mTime()).withMode(first.mode()).withSymlink(first.symlink()));
fileHelper.replaceChunks(oursCurFile, firstChunks);
var newFile = new File(JObjectKey.random(), second.mode(), second.cTime(), second.mTime(), second.symlink());
remoteTx.putData(newFile);
fileHelper.replaceChunks(newFile, secondChunks);
var parent = fileService.inoToParent(oursCurFile.key());
int i = 0;
do {
try {
getTreeW().move(parent.getRight(),
new JKleppmannTreeNodeMetaFile(
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
newFile.key()
),
getTreeW().getNewNodeId()
);
} catch (AlreadyExistsException aex) {
i++;
continue;
}
break;
} while (true);
}
var curKnownRemoteVersion = oursCurMeta.knownRemoteVersions().get(from);
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
oursCurMeta = oursCurMeta.withKnownRemoteVersions(oursCurMeta.knownRemoteVersions().plus(from, receivedTotalVer));
curTx.put(oursCurMeta);
}
}
@Override
public void handleRemoteUpdate(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
@Nullable FileDto receivedData) {
var current = curTx.get(RemoteObjectMeta.class, key).orElse(null);
if (current == null) {
current = new RemoteObjectMeta(key, HashTreePMap.empty());
curTx.put(current);
}
var changelogCompare = SyncHelper.compareChangelogs(current.changelog(), receivedChangelog);
switch (changelogCompare) {
case EQUAL -> {
Log.debug("No action on update: " + key + " from " + from);
if (!current.hasLocalData() && receivedData != null) {
current = current.withHaveLocal(true);
curTx.put(current);
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
if (!current.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
if (!current.knownType().equals(File.class))
current = current.withKnownType(File.class);
curTx.put(current);
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
}
}
case NEWER -> {
Log.debug("Received newer index update than known: " + key + " from " + from);
var newChangelog = receivedChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ?
receivedChangelog : receivedChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L);
current = current.withChangelog(newChangelog);
if (receivedData != null) {
current = current.withHaveLocal(true);
curTx.put(current);
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
if (!current.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
if (!current.knownType().equals(File.class))
current = current.withKnownType(File.class);
curTx.put(current);
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
} else {
current = current.withHaveLocal(false);
curTx.put(current);
}
}
case OLDER -> {
Log.debug("Received older index update than known: " + key + " from " + from);
return;
}
case CONFLICT -> {
Log.debug("Conflict on update (inconsistent version): " + key + " from " + from);
assert receivedData != null;
resolveConflict(from, key, receivedChangelog, receivedData);
// TODO:
return;
}
}
var curKnownRemoteVersion = current.knownRemoteVersions().get(from);
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
current = current.withKnownRemoteVersions(current.knownRemoteVersions().plus(from, receivedTotalVer));
curTx.put(current);
}
}
}

View File

@@ -1,18 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaDirectory(name);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of();
}
}

View File

@@ -1,19 +0,0 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
@Override
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaFile(name, fileIno);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of(fileIno);
}
}

View File

@@ -1,29 +0,0 @@
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -1,172 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fuse</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.serceman</groupId>
<artifactId>jnr-fuse</artifactId>
<version>0.5.8</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,35 +0,0 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.port=42069
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.peerdiscovery.broadcast=true
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.objects.reconnect_interval=5s
dhfs.objects.write_log=false
dhfs.objects.periodic-push-op-interval=5m
dhfs.fuse.root=${HOME}/dhfs_default/fuse
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=524288
dhfs.files.max_chunk_size=524288
dhfs.files.target_chunk_alignment=17
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.files.use_hash_for_chunks=false
dhfs.objects.autosync.threads=16
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=16
dhfs.objects.ref-processor.threads=16
dhfs.objects.opsender.batch-size=100
dhfs.objects.lock_timeout_secs=2
dhfs.local-discovery=true
dhfs.peerdiscovery.timeout=10000
quarkus.log.category."com.usatiuk".min-level=TRACE
quarkus.log.category."com.usatiuk".level=TRACE
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required

View File

@@ -1,40 +0,0 @@
package com.usatiuk.dhfsfuse;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
public static void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
}
}

View File

@@ -1,235 +0,0 @@
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class KillIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
Network network;
ExecutorService executor;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("").toFile();
data2 = Files.createTempDirectory("").toFile();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data2);
executor.close();
network.close();
}
private void checkConsistency() {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@Test
void killTest(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTest2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
}

View File

@@ -1,215 +0,0 @@
package com.usatiuk.dhfsfuse.integration;
import io.quarkus.logging.Log;
import java.io.*;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
public class LazyFs {
private static final String lazyFsPath;
static {
lazyFsPath = System.getProperty("lazyFsPath");
System.out.println("LazyFs Path: " + lazyFsPath);
}
private final String mountRoot;
private final String dataRoot;
private final String name;
private final File configFile;
private final File fifoFile;
private Thread errPiper;
private Thread outPiper;
private CountDownLatch startLatch;
private Process fs;
public LazyFs(String name, String mountRoot, String dataRoot) {
this.name = name;
this.mountRoot = mountRoot;
this.dataRoot = dataRoot;
try {
configFile = File.createTempFile("lazyfs", ".conf");
configFile.deleteOnExit();
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
fifoFile.deleteOnExit();
} catch (IOException e) {
throw new RuntimeException(e);
}
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
}
private String fifoPath() {
return fifoFile.getAbsolutePath();
}
public void start(String extraOpts) {
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
if (!lfsPath.toFile().isFile())
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
if (!lfsPath.toFile().canExecute())
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
try (var rwFile = new RandomAccessFile(configFile, "rw");
var channel = rwFile.getChannel()) {
channel.truncate(0);
var config = "[faults]\n" +
"fifo_path=\"" + fifoPath() + "\"\n" +
"[cache]\n" +
"apply_eviction=false\n" +
"[cache.simple]\n" +
"custom_size=\"1gb\"\n" +
"blocks_per_page=1\n" +
"[filesystem]\n" +
"log_all_operations=false\n" +
"logfile=\"\"\n" + extraOpts;
rwFile.write(config.getBytes());
Log.info("LazyFs config: \n" + config);
} catch (Exception e) {
throw new RuntimeException(e);
}
var argList = new ArrayList<String>();
argList.add(lfsPath.toString());
argList.add(Path.of(mountRoot).toString());
argList.add("--config-path");
argList.add(configFile.getAbsolutePath());
argList.add("-o");
argList.add("allow_other");
argList.add("-o");
argList.add("modules=subdir");
argList.add("-o");
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
try {
Log.info("Starting LazyFs " + argList);
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
} catch (Exception e) {
throw new RuntimeException(e);
}
startLatch = new CountDownLatch(1);
outPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
String line;
while ((line = input.readLine()) != null) {
if (line.contains("running LazyFS"))
startLatch.countDown();
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs out piper finished");
});
outPiper.start();
errPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
String line;
while ((line = input.readLine()) != null) {
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs err piper finished");
});
errPiper.start();
try {
if (!startLatch.await(30, TimeUnit.SECONDS))
throw new RuntimeException("StartLatch timed out");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
Log.info("LazyFs started");
}
public void start() {
start("");
}
private String mdbPath() {
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
}
public void startTornOp() {
start("\n" +
"[[injection]]\n" +
"type=\"torn-seq\"\n" +
"op=\"write\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"persist=[1,4]\n" +
"occurrence=3");
}
public void startTornSeq() {
start("[[injection]]\n" +
"type=\"torn-op\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"occurrence=3\n" +
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
"persist=[1,3]");
}
public void crash() {
try {
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
Log.info("Running command: " + cmd);
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void stop() {
try {
synchronized (this) {
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
// Doesn't actually work?
//
// public void crashop() {
// try {
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
//
// public void crashseq() {
// try {
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
}

View File

@@ -1,489 +0,0 @@
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class LazyFsIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
File data1Lazy;
File data2Lazy;
LazyFs lazyFs1;
LazyFs lazyFs2;
ExecutorService executor;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("dhfsdata").toFile();
data2 = Files.createTempDirectory("dhfsdata").toFile();
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
network = Network.newNetwork();
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
lazyFs1.start();
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
lazyFs2.start();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
lazyFs1.stop();
lazyFs2.stop();
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data1Lazy);
TestDataCleaner.purgeDirectory(data2);
TestDataCleaner.purgeDirectory(data2Lazy);
executor.close();
network.close();
}
private void checkConsistency(String testName) {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info("Listing consistency " + testName + "\n"
+ ls1 + "\n"
+ cat1 + "\n"
+ ls2 + "\n"
+ cat2 + "\n");
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Log.info("Killing");
Thread.sleep(3000);
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Thread.sleep(3000);
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
private static enum CrashType {
CRASH,
TORN_OP,
TORN_SEQ
}
}

View File

@@ -1,11 +0,0 @@
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false
dhfs.objects.persistence.snapshot-extra-checks=true

View File

@@ -18,11 +18,6 @@
<artifactId>junit-jupiter-engine</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
@@ -35,9 +30,5 @@
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>jakarta.annotation</groupId>
<artifactId>jakarta.annotation-api</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -15,6 +15,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
private final PeerInterface<PeerIdT> _peers;
private final Clock<TimestampT> _clock;
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
PeerInterface<PeerIdT> peers,
@@ -52,20 +53,18 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
var node = _storage.getById(effect.childId());
var curParent = _storage.getById(effect.newParentId());
{
var newCurParentChildren = curParent.children().minus(node.name());
var newCurParentChildren = curParent.children().minus(node.meta().getName());
curParent = curParent.withChildren(newCurParentChildren);
_storage.putNode(curParent);
}
if (effect.oldInfo().oldMeta() != null
&& node.meta() != null
&& !node.meta().getClass().equals(effect.oldInfo().oldMeta().getClass()))
if (!node.meta().getClass().equals(effect.oldInfo().oldMeta().getClass()))
throw new IllegalArgumentException("Class mismatch for meta for node " + node.key());
// Needs to be read after changing curParent, as it might be the same node
var oldParent = _storage.getById(effect.oldInfo().oldParent());
{
var newOldParentChildren = oldParent.children().plus(effect.oldName(), node.key());
var newOldParentChildren = oldParent.children().plus(node.meta().getName(), node.key());
oldParent = oldParent.withChildren(newOldParentChildren);
_storage.putNode(oldParent);
}
@@ -78,7 +77,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
var node = _storage.getById(effect.childId());
var curParent = _storage.getById(effect.newParentId());
{
var newCurParentChildren = curParent.children().minus(node.name());
var newCurParentChildren = curParent.children().minus(node.meta().getName());
curParent = curParent.withChildren(newCurParentChildren);
_storage.putNode(curParent);
}
@@ -86,11 +85,11 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
node.withParent(null)
.withLastEffectiveOp(null)
);
_undoCtx.put(node.key(), node);
}
}
private void undoOp(LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
LOGGER.finer(() -> "Will undo op: " + op);
if (op.effects() != null)
for (var e : op.effects().reversed())
undoEffect(e);
@@ -141,8 +140,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
}
}
}
}
if (!inTrash.isEmpty()) {
var trash = _storage.getById(_storage.getTrashId());
for (var n : inTrash) {
@@ -167,8 +166,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
public void move(NodeIdT newParent, MetaT newMeta, NodeIdT child, boolean failCreatingIfExists) {
var createdMove = createMove(newParent, newMeta, child);
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
_opRecorder.recordOp(createdMove);
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
}
public void applyExternalOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
@@ -179,7 +178,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
// Returns true if the timestamp is newer than what's seen, false otherwise
private boolean updateTimestampImpl(PeerIdT from, TimestampT newTimestamp) {
TimestampT oldRef = _storage.getPeerTimestampLog().getForPeer(from);
if (oldRef != null && oldRef.compareTo(newTimestamp) >= 0) { // FIXME?
if (oldRef != null && oldRef.compareTo(newTimestamp) > 0) { // FIXME?
LOGGER.warning("Wrong op order: received older than known from " + from.toString());
return false;
}
@@ -187,20 +186,20 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return true;
}
public void updateExternalTimestamp(PeerIdT from, TimestampT timestamp) {
public boolean updateExternalTimestamp(PeerIdT from, TimestampT timestamp) {
// TODO: Ideally no point in this separate locking?
var gotExt = _storage.getPeerTimestampLog().getForPeer(from);
var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId());
if (!(gotExt != null && gotExt.compareTo(timestamp) >= 0))
updateTimestampImpl(from, timestamp);
if (!(gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0))
updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack?
if ((gotExt != null && gotExt.compareTo(timestamp) >= 0)
&& (gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0)) return false;
updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack?
updateTimestampImpl(from, timestamp);
tryTrimLog();
return true;
}
private void applyOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
if (!updateTimestampImpl(op.timestamp().nodeId(), op.timestamp().timestamp())) return;
LOGGER.finer(() -> "Will apply op: " + op + " from " + from);
if (!updateTimestampImpl(from, op.timestamp().timestamp())) return;
var log = _storage.getLog();
@@ -213,16 +212,31 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
assert cmp != 0;
if (cmp < 0) {
if (log.containsKey(op.timestamp())) return;
var toUndo = log.newestSlice(op.timestamp(), false);
for (var entry : toUndo.reversed()) {
undoOp(entry.getValue());
try {
if (log.containsKey(op.timestamp())) return;
var toUndo = log.newestSlice(op.timestamp(), false);
_undoCtx = new HashMap<>();
for (var entry : toUndo.reversed()) {
undoOp(entry.getValue());
}
try {
doAndPut(op, failCreatingIfExists);
} finally {
for (var entry : toUndo) {
redoOp(entry);
}
if (!_undoCtx.isEmpty()) {
for (var e : _undoCtx.entrySet()) {
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
_storage.removeNode(e.getKey());
}
}
_undoCtx = null;
}
} finally {
tryTrimLog();
}
doAndPut(op, failCreatingIfExists);
for (var entry : toUndo) {
redoOp(entry);
}
tryTrimLog();
} else {
doAndPut(op, failCreatingIfExists);
tryTrimLog();
@@ -238,14 +252,14 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> doOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
LOGGER.finer(() -> "Doing op: " + op);
LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computed;
try {
computed = computeEffects(op, failCreatingIfExists);
} catch (AlreadyExistsException aex) {
throw aex;
} catch (Exception e) {
throw new RuntimeException("Error computing effects for op " + op.toString(), e);
LOGGER.log(Level.SEVERE, "Error computing effects for op" + op.toString(), e);
computed = new LogRecord<>(op, null);
}
if (computed.effects() != null)
@@ -254,12 +268,29 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
if (_undoCtx != null) {
var node = _undoCtx.get(key);
if (node != null) {
try {
if (!node.children().isEmpty()) {
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
}
node = node.withParent(parent).withMeta(meta);
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
node = null;
}
}
if (node != null) {
_undoCtx.remove(key);
return node;
}
}
return _storage.createNewNode(key, parent, meta);
}
private void applyEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> sourceOp, List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) {
for (var effect : effects) {
LOGGER.finer(() -> "Applying effect: " + effect + " from op " + sourceOp);
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> oldParentNode = null;
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> newParentNode;
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> node;
@@ -273,7 +304,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
node = _storage.getById(effect.childId());
}
if (oldParentNode != null) {
var newOldParentChildren = oldParentNode.children().minus(effect.oldName());
var newOldParentChildren = oldParentNode.children().minus(effect.oldInfo().oldMeta().getName());
oldParentNode = oldParentNode.withChildren(newOldParentChildren);
_storage.putNode(oldParentNode);
}
@@ -282,12 +313,12 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
newParentNode = _storage.getById(effect.newParentId());
{
var newNewParentChildren = newParentNode.children().plus(effect.newName(), effect.childId());
var newNewParentChildren = newParentNode.children().plus(effect.newMeta().getName(), effect.childId());
newParentNode = newParentNode.withChildren(newNewParentChildren);
_storage.putNode(newParentNode);
}
if (effect.newParentId().equals(_storage.getTrashId()) &&
!Objects.equals(effect.newName(), effect.childId().toString()))
!Objects.equals(effect.newMeta().getName(), effect.childId().toString()))
throw new IllegalArgumentException("Move to trash should have id of node as name");
_storage.putNode(
node.withParent(effect.newParentId())
@@ -304,46 +335,32 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
NodeIdT newParentId = op.newParentId();
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> newParent = _storage.getById(newParentId);
if (newParent == null) {
LOGGER.log(Level.SEVERE, "New parent not found " + op.newName() + " " + op.childId());
// Creation
if (oldParentId == null) {
LOGGER.severe(() -> "Creating both dummy parent and child node");
return new LogRecord<>(op, List.of(
new LogEffect<>(null, op, _storage.getLostFoundId(), null, newParentId),
new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId())
));
} else {
LOGGER.severe(() -> "Moving child node to dummy parent");
return new LogRecord<>(op, List.of(
new LogEffect<>(null, op, _storage.getLostFoundId(), null, newParentId),
new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, node.meta()), op, op.newParentId(), op.newMeta(), op.childId())
));
}
LOGGER.log(Level.SEVERE, "New parent not found " + op.newMeta().getName() + " " + op.childId());
return new LogRecord<>(op, null);
}
if (oldParentId == null) {
var conflictNodeId = newParent.children().get(op.newName());
var conflictNodeId = newParent.children().get(op.newMeta().getName());
if (conflictNodeId != null) {
if (failCreatingIfExists)
throw new AlreadyExistsException("Already exists: " + op.newName() + ": " + conflictNodeId);
throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId);
var conflictNode = _storage.getById(conflictNodeId);
MetaT conflictNodeMeta = conflictNode.meta();
LOGGER.finer(() -> "Node creation conflict: " + conflictNode);
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
return new LogRecord<>(op, null);
}
String newConflictNodeName = op.newName() + ".conflict." + conflictNode.key();
String newOursName = op.newName() + ".conflict." + op.childId();
String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.key();
String newOursName = op.newMeta().getName() + ".conflict." + op.childId();
return new LogRecord<>(op, List.of(
new LogEffect<>(new LogEffectOld<>(conflictNode.lastEffectiveOp(), newParentId, conflictNodeMeta), conflictNode.lastEffectiveOp(), newParentId, (MetaT) conflictNodeMeta.withName(newConflictNodeName), conflictNodeId),
new LogEffect<>(null, op, op.newParentId(), (MetaT) op.newMeta().withName(newOursName), op.childId())
));
} else {
LOGGER.finer(() -> "Simple node creation");
return new LogRecord<>(op, List.of(
new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId())
));
@@ -355,26 +372,24 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
MetaT oldMeta = node.meta();
if (oldMeta != null
&& op.newMeta() != null
&& !oldMeta.getClass().equals(op.newMeta().getClass())) {
throw new RuntimeException("Class mismatch for meta for node " + node.key());
if (!oldMeta.getClass().equals(op.newMeta().getClass())) {
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
return new LogRecord<>(op, null);
}
var replaceNodeId = newParent.children().get(op.newName());
var replaceNodeId = newParent.children().get(op.newMeta().getName());
if (replaceNodeId != null) {
var replaceNode = _storage.getById(replaceNodeId);
var replaceNodeMeta = replaceNode.meta();
LOGGER.finer(() -> "Node replacement: " + replaceNode);
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
return new LogRecord<>(op, null);
}
return new LogRecord<>(op, List.of(
new LogEffect<>(new LogEffectOld<>(replaceNode.lastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.lastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId),
new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId())
));
}
LOGGER.finer(() -> "Simple node move");
return new LogRecord<>(op, List.of(
new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId())
));
@@ -429,18 +444,18 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
walkTree(node -> {
var op = node.lastEffectiveOp();
if (node.lastEffectiveOp() == null) return;
LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newName() + " " + op.childId() + "->" + op.newParentId());
LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
result.put(node.lastEffectiveOp().timestamp(), node.lastEffectiveOp());
});
for (var le : _storage.getLog().getAll()) {
var op = le.getValue().op();
LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newName() + " " + op.childId() + "->" + op.newParentId());
LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
result.put(le.getKey(), le.getValue().op());
}
for (var op : result.values()) {
LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newName() + " " + op.childId() + "->" + op.newParentId());
LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
_opRecorder.recordOpForPeer(host, op);
}
}

View File

@@ -8,17 +8,4 @@ public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT exten
NodeIdT newParentId,
MetaT newMeta,
NodeIdT childId) implements Serializable {
public String oldName() {
if (oldInfo.oldMeta() != null) {
return oldInfo.oldMeta().name();
}
return childId.toString();
}
public String newName() {
if (newMeta != null) {
return newMeta.name();
}
return childId.toString();
}
}

View File

@@ -3,7 +3,7 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
public interface NodeMeta extends Serializable {
String name();
String getName();
NodeMeta withName(String name);
}

View File

@@ -5,9 +5,4 @@ import java.io.Serializable;
public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
(CombinedTimestamp<TimestampT, PeerIdT> timestamp, NodeIdT newParentId, MetaT newMeta,
NodeIdT childId) implements Serializable {
public String newName() {
if (newMeta != null)
return newMeta.name();
return childId.toString();
}
}

View File

@@ -9,8 +9,6 @@ public interface StorageInterface<
NodeIdT getTrashId();
NodeIdT getLostFoundId();
NodeIdT getNewNodeId();
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getById(NodeIdT id);

View File

@@ -1,9 +1,9 @@
package com.usatiuk.kleppmanntree;
import jakarta.annotation.Nullable;
import org.pcollections.PMap;
import java.io.Serializable;
import java.util.Map;
public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> extends Serializable {
NodeIdT key();
@@ -12,15 +12,8 @@ public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT ext
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp();
@Nullable
MetaT meta();
default String name() {
var meta = meta();
if (meta != null) return meta.name();
return key().toString();
}
PMap<String, NodeIdT> children();
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withParent(NodeIdT parent);

View File

@@ -2,15 +2,13 @@ package com.usatiuk.kleppmanntree;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import java.util.List;
public class KleppmanTreeSimpleTest {
private final TestNode testNode1 = new TestNode(1);
private final TestNode testNode2 = new TestNode(2);
private final TestNode testNode3 = new TestNode(3);
@Test
void circularTest() {
@@ -91,75 +89,4 @@ public class KleppmanTreeSimpleTest {
Assertions.assertTrue(testNode2._storageInterface.getLog().size() <= 1);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
void undoWithRenameTest(boolean opOrder) {
var d1id = testNode1._storageInterface.getNewNodeId();
var d2id = testNode2._storageInterface.getNewNodeId();
var d3id = testNode3._storageInterface.getNewNodeId();
testNode1._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d1id);
testNode2._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d2id);
testNode3._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d3id);
var r1 = testNode1.getRecorded();
var r2 = testNode2.getRecorded();
var r3 = testNode3.getRecorded();
Assertions.assertEquals(1, r1.size());
Assertions.assertEquals(1, r2.size());
Assertions.assertEquals(1, r3.size());
if (opOrder) {
testNode2._tree.applyExternalOp(3L, r3.getFirst());
testNode2._tree.applyExternalOp(1L, r1.getFirst());
} else {
testNode2._tree.applyExternalOp(1L, r1.getFirst());
testNode2._tree.applyExternalOp(3L, r3.getFirst());
}
Assertions.assertIterableEquals(List.of("Test1", "Test1.conflict." + d1id, "Test1.conflict." + d2id), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet());
if (opOrder) {
testNode1._tree.applyExternalOp(3L, r3.getFirst());
testNode1._tree.applyExternalOp(2L, r2.getFirst());
} else {
testNode1._tree.applyExternalOp(2L, r2.getFirst());
testNode1._tree.applyExternalOp(3L, r3.getFirst());
}
Assertions.assertIterableEquals(List.of("Test1", "Test1.conflict." + d1id, "Test1.conflict." + d2id), testNode1._storageInterface.getById(testNode1._storageInterface.getRootId()).children().keySet());
if (opOrder) {
testNode3._tree.applyExternalOp(2L, r2.getFirst());
testNode3._tree.applyExternalOp(1L, r1.getFirst());
} else {
testNode3._tree.applyExternalOp(1L, r1.getFirst());
testNode3._tree.applyExternalOp(2L, r2.getFirst());
}
Assertions.assertIterableEquals(List.of("Test1", "Test1.conflict." + d1id, "Test1.conflict." + d2id), testNode3._storageInterface.getById(testNode3._storageInterface.getRootId()).children().keySet());
}
@Test
void noFailedOpRecordTest() {
var d1id = testNode1._storageInterface.getNewNodeId();
var d2id = testNode1._storageInterface.getNewNodeId();
testNode1._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d1id);
Assertions.assertThrows(AlreadyExistsException.class, () -> testNode1._tree.move(testNode1._storageInterface.getRootId(), new TestNodeMetaDir("Test1"), d2id));
var r1 = testNode1.getRecorded();
Assertions.assertEquals(1, r1.size());
}
@Test
void externalOpWithDummy() {
Long d1id = testNode1._storageInterface.getNewNodeId();
Long f1id = testNode1._storageInterface.getNewNodeId();
testNode1._tree.applyExternalOp(2L, new OpMove<>(
new CombinedTimestamp<>(2L, 2L), d1id, new TestNodeMetaFile("Hi", 123), f1id
));
testNode1._tree.applyExternalOp(2L, new OpMove<>(
new CombinedTimestamp<>(3L, 2L), testNode1._storageInterface.getRootId(), new TestNodeMetaDir("HiDir"), d1id
));
Assertions.assertEquals(f1id, testNode1._tree.traverse(List.of("HiDir", "Hi")));
}
}

View File

@@ -8,7 +8,7 @@ public abstract class TestNodeMeta implements NodeMeta {
}
@Override
public String name() {
public String getName() {
return _name;
}

View File

@@ -14,7 +14,6 @@ public class TestStorageInterface implements StorageInterface<Long, Long, TestNo
_peerId = peerId;
_nodes.put(getRootId(), new TestTreeNode(getRootId(), null, null));
_nodes.put(getTrashId(), new TestTreeNode(getTrashId(), null, null));
_nodes.put(getLostFoundId(), new TestTreeNode(getLostFoundId(), null, null));
}
@Override
@@ -27,11 +26,6 @@ public class TestStorageInterface implements StorageInterface<Long, Long, TestNo
return -1L;
}
@Override
public Long getLostFoundId() {
return -2L;
}
@Override
public Long getNewNodeId() {
return _curId++ | _peerId << 32;

View File

@@ -18,11 +18,6 @@
</properties>
<dependencies>
<dependency>
<groupId>net.jqwik</groupId>
<artifactId>jqwik</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
@@ -59,6 +54,11 @@
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>supportlib</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5-mockito</artifactId>
@@ -88,11 +88,6 @@
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
@@ -104,6 +99,7 @@
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>

View File

@@ -0,0 +1,28 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import org.apache.commons.lang3.tuple.Pair;
import java.util.Iterator;
public interface CloseableKvIterator<K extends Comparable<K>, V> extends Iterator<Pair<K, V>>, AutoCloseableNoThrow {
K peekNextKey();
Class<?> peekNextType();
void skip();
K peekPrevKey();
Class<?> peekPrevType();
Pair<K, V> prev();
boolean hasPrev();
void skipPrev();
default CloseableKvIterator<K, V> reversed() {
return new ReversedKvIterator<>(this);
}
}

View File

@@ -1,15 +1,18 @@
package com.usatiuk.objects.transaction;
package com.usatiuk.dhfs.objects;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import com.usatiuk.dhfs.objects.transaction.LockingStrategy;
import com.usatiuk.dhfs.objects.transaction.Transaction;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import jakarta.inject.Singleton;
import org.apache.commons.lang3.tuple.Pair;
import javax.annotation.Nonnull;
import java.util.Collection;
import java.util.Iterator;
import java.util.Optional;
@Singleton
@ApplicationScoped
public class CurrentTransaction implements Transaction {
@Inject
TransactionManager transactionManager;
@@ -34,6 +37,12 @@ public class CurrentTransaction implements Transaction {
transactionManager.current().delete(key);
}
@Nonnull
@Override
public Collection<JObjectKey> findAllObjects() {
return transactionManager.current().findAllObjects();
}
@Override
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
return transactionManager.current().getIterator(start, key);
@@ -43,9 +52,4 @@ public class CurrentTransaction implements Transaction {
public <T extends JData> void put(JData obj) {
transactionManager.current().put(obj);
}
@Override
public <T extends JData> void putNew(JData obj) {
transactionManager.current().putNew(obj);
}
}

View File

@@ -0,0 +1,10 @@
package com.usatiuk.dhfs.objects;
import java.util.Optional;
public record Data<V>(V value) implements MaybeTombstone<V> {
@Override
public Optional<V> opt() {
return Optional.of(value);
}
}

View File

@@ -0,0 +1,8 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
@FunctionalInterface
public interface IterProdFn<K extends Comparable<K>, V> {
CloseableKvIterator<K, V> get(IteratorStart start, K key);
}

View File

@@ -0,0 +1,16 @@
package com.usatiuk.dhfs.objects;
import java.io.Serializable;
// TODO: This could be maybe moved to a separate module?
// The base class for JObject data
// Only one instance of this "exists" per key, the instance in the manager is canonical
// When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred.
// It is immutable, its version is filled in by the allocator from the AllocVersionProvider
public interface JData extends Serializable {
JObjectKey key();
default int estimateSize() {
return 100;
}
}

View File

@@ -0,0 +1,9 @@
package com.usatiuk.dhfs.objects;
public interface JDataVersionedWrapper {
JData data();
long version();
int estimateSize();
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.objects;
package com.usatiuk.dhfs.objects;
import jakarta.annotation.Nonnull;

View File

@@ -0,0 +1,44 @@
package com.usatiuk.dhfs.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.utils.SerializationHelper;
public class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
private final long _version;
private ByteString _rawData;
private JData _data;
public JDataVersionedWrapperLazy(long version, ByteString rawData) {
_version = version;
_rawData = rawData;
}
public JData data() {
if (_data != null)
return _data;
synchronized (this) {
if (_data != null)
return _data;
try (var is = _rawData.newInput()) {
_data = SerializationHelper.deserialize(is);
} catch (Exception e) {
throw new RuntimeException(e);
}
_rawData = null;
return _data;
}
}
public long version() {
return _version;
}
@Override
public int estimateSize() {
if (_data != null)
return _data.estimateSize();
return _rawData.size();
}
}

View File

@@ -0,0 +1,44 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
public record JObjectKey(String name) implements Serializable, Comparable<JObjectKey> {
public static JObjectKey of(String name) {
return new JObjectKey(name);
}
@Override
public int compareTo(JObjectKey o) {
return name.compareTo(o.name);
}
@Override
public String toString() {
return name;
}
public byte[] bytes() {
return name.getBytes(StandardCharsets.UTF_8);
}
public ByteBuffer toByteBuffer() {
var heapBb = StandardCharsets.UTF_8.encode(name);
if (heapBb.isDirect()) return heapBb;
var directBb = UninitializedByteBuffer.allocateUninitialized(heapBb.remaining());
directBb.put(heapBb);
directBb.flip();
return directBb;
}
public static JObjectKey fromBytes(byte[] bytes) {
return new JObjectKey(new String(bytes, StandardCharsets.UTF_8));
}
public static JObjectKey fromByteBuffer(ByteBuffer buff) {
return new JObjectKey(StandardCharsets.UTF_8.decode(buff).toString());
}
}

View File

@@ -0,0 +1,239 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.snapshot.SnapshotManager;
import com.usatiuk.dhfs.objects.transaction.*;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import io.quarkus.logging.Log;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.enterprise.inject.Instance;
import jakarta.inject.Inject;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Stream;
// Manages all access to com.usatiuk.dhfs.objects.JData objects.
// In particular, it serves as a source of truth for what is committed to the backing storage.
// All data goes through it, it is responsible for transaction atomicity
// TODO: persistent tx id
@ApplicationScoped
public class JObjectManager {
private final List<PreCommitTxHook> _preCommitTxHooks;
private boolean _ready = false;
@Inject
SnapshotManager snapshotManager;
@Inject
TransactionFactory transactionFactory;
@Inject
LockManager lockManager;
private void verifyReady() {
if (!_ready) throw new IllegalStateException("Wrong service order!");
}
void init(@Observes @Priority(200) StartupEvent event) {
_ready = true;
}
JObjectManager(Instance<PreCommitTxHook> preCommitTxHooks) {
_preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList();
}
public TransactionPrivate createTransaction() {
verifyReady();
var tx = transactionFactory.createTransaction();
Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id());
return tx;
}
public TransactionHandle commit(TransactionPrivate tx) {
verifyReady();
var writes = new LinkedHashMap<JObjectKey, TxRecord.TxObjectRecord<?>>();
var dependenciesLocked = new LinkedHashMap<JObjectKey, Optional<JDataVersionedWrapper>>();
Map<JObjectKey, TransactionObject<?>> readSet;
var toUnlock = new ArrayList<AutoCloseableNoThrow>();
Consumer<JObjectKey> addDependency =
key -> {
dependenciesLocked.computeIfAbsent(key, k -> {
var lock = lockManager.lockObject(k);
toUnlock.add(lock);
return snapshotManager.readObjectDirect(k);
});
};
// For existing objects:
// Check that their version is not higher than the version of transaction being committed
// TODO: check deletions, inserts
try {
try {
Function<JObjectKey, JData> getPrev =
key -> switch (writes.get(key)) {
case TxRecord.TxObjectRecordWrite<?> write -> write.data();
case TxRecord.TxObjectRecordDeleted deleted -> null;
case null -> tx.getFromSource(JData.class, key).orElse(null);
default -> {
throw new TxCommitException("Unexpected value: " + writes.get(key));
}
};
boolean somethingChanged;
do {
somethingChanged = false;
Map<JObjectKey, TxRecord.TxObjectRecord<?>> currentIteration = new HashMap();
for (var hook : _preCommitTxHooks) {
for (var n : tx.drainNewWrites())
currentIteration.put(n.key(), n);
Log.trace("Commit iteration with " + currentIteration.size() + " records for hook " + hook.getClass());
for (var entry : currentIteration.entrySet()) {
somethingChanged = true;
Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey());
var oldObj = getPrev.apply(entry.getKey());
switch (entry.getValue()) {
case TxRecord.TxObjectRecordWrite<?> write -> {
if (oldObj == null) {
hook.onCreate(write.key(), write.data());
} else {
hook.onChange(write.key(), oldObj, write.data());
}
}
case TxRecord.TxObjectRecordDeleted deleted -> {
hook.onDelete(deleted.key(), oldObj);
}
default -> throw new TxCommitException("Unexpected value: " + entry);
}
}
}
writes.putAll(currentIteration);
} while (somethingChanged);
if (writes.isEmpty()) {
Log.trace("Committing transaction - no changes");
return new TransactionHandle() {
@Override
public void onFlush(Runnable runnable) {
runnable.run();
}
};
}
} finally {
readSet = tx.reads();
Stream.concat(readSet.keySet().stream(), writes.keySet().stream())
.sorted(Comparator.comparing(JObjectKey::toString))
.forEach(addDependency);
for (var read : readSet.entrySet()) {
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
toUnlock.add(locked.lock());
}
}
}
Log.trace("Committing transaction start");
var snapshotId = tx.snapshot().id();
for (var read : readSet.entrySet()) {
var dep = dependenciesLocked.get(read.getKey());
if (dep.isEmpty() != read.getValue().data().isEmpty()) {
Log.trace("Checking read dependency " + read.getKey() + " - not found");
throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
}
if (dep.isEmpty()) {
// TODO: Every write gets a dependency due to hooks
continue;
// assert false;
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
}
if (dep.get().version() > snapshotId) {
Log.trace("Checking dependency " + read.getKey() + " - newer than");
throw new TxCommitException("Serialization hazard: " + dep.get().data().key() + " " + dep.get().version() + " vs " + snapshotId);
}
Log.trace("Checking dependency " + read.getKey() + " - ok with read");
}
var addFlushCallback = snapshotManager.commitTx(
writes.values().stream()
.filter(r -> {
if (r instanceof TxRecord.TxObjectRecordWrite<?>(JData data)) {
var dep = dependenciesLocked.get(data.key());
if (dep.isPresent() && dep.get().version() > snapshotId) {
Log.trace("Skipping write " + data.key() + " - dependency " + dep.get().version() + " vs " + snapshotId);
return false;
}
}
return true;
}).toList());
for (var callback : tx.getOnCommit()) {
callback.run();
}
for (var callback : tx.getOnFlush()) {
addFlushCallback.accept(callback);
}
return new TransactionHandle() {
@Override
public void onFlush(Runnable runnable) {
addFlushCallback.accept(runnable);
}
};
} catch (Throwable t) {
Log.trace("Error when committing transaction", t);
throw new TxCommitException(t.getMessage(), t);
} finally {
for (var unlock : toUnlock) {
unlock.close();
}
tx.close();
}
}
public void rollback(TransactionPrivate tx) {
verifyReady();
tx.reads().forEach((key, value) -> {
if (value instanceof TransactionObjectLocked<?> locked) {
locked.lock().close();
}
});
tx.close();
}
// private class TransactionObjectSourceImpl implements TransactionObjectSource {
// private final long _txId;
//
// private TransactionObjectSourceImpl(long txId) {
// _txId = txId;
// }
//
// @Override
// public <T extends JData> TransactionObject<T> get(Class<T> type, JObjectKey key) {
// var got = getObj(type, key);
// if (got.data().isPresent() && got.data().get().version() > _txId) {
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
// }
// return got;
// }
//
// @Override
// public <T extends JData> TransactionObject<T> getWriteLocked(Class<T> type, JObjectKey key) {
// var got = getObjLock(type, key);
// if (got.data().isPresent() && got.data().get().version() > _txId) {
// got.lock().close();
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
// }
// return got;
// }
// }
}

View File

@@ -0,0 +1,26 @@
package com.usatiuk.dhfs.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.utils.SerializationHelper;
import jakarta.enterprise.context.ApplicationScoped;
import java.nio.ByteBuffer;
@ApplicationScoped
public class JavaDataSerializer implements ObjectSerializer<JDataVersionedWrapper> {
@Override
public ByteString serialize(JDataVersionedWrapper obj) {
ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
buffer.putLong(obj.version());
buffer.flip();
return ByteString.copyFrom(buffer).concat(SerializationHelper.serialize(obj.data()));
}
@Override
public JDataVersionedWrapper deserialize(ByteString data) {
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
var rawData = data.substring(Long.BYTES);
return new JDataVersionedWrapperLazy(version, rawData);
}
}

View File

@@ -1,5 +1,6 @@
package com.usatiuk.objects.iterators;
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import org.apache.commons.lang3.tuple.Pair;
import java.util.NoSuchElementException;
@@ -39,20 +40,20 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
}
// switch (start) {
// case LT -> {
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
// }
// case LE -> {
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
// }
// case GT -> {
// assert _next == null || _next.compareTo(startKey) > 0;
// }
// case GE -> {
// assert _next == null || _next.compareTo(startKey) >= 0;
// }
// }
switch (start) {
case LT -> {
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
}
case LE -> {
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
}
case GT -> {
assert _next == null || _next.compareTo(startKey) > 0;
}
case GE -> {
assert _next == null || _next.compareTo(startKey) >= 0;
}
}
}
private void fillNext() {
@@ -113,6 +114,11 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
return got;
}
@Override
protected Class<? > peekTypeImpl() {
return _goingForward ? _backing.peekNextType() : _backing.peekPrevType();
}
@Override
public void close() {
_backing.close();

View File

@@ -0,0 +1,14 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import com.usatiuk.dhfs.utils.DataLocker;
import jakarta.enterprise.context.ApplicationScoped;
@ApplicationScoped
public class LockManager {
private final DataLocker _objLocker = new DataLocker();
public AutoCloseableNoThrow lockObject(JObjectKey key) {
return _objLocker.lock(key);
}
}

View File

@@ -1,16 +1,19 @@
package com.usatiuk.objects.iterators;
package com.usatiuk.dhfs.objects;
import org.apache.commons.lang3.tuple.Pair;
import java.util.NoSuchElementException;
import java.util.function.Function;
public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements CloseableKvIterator<K, V_T> {
private final CloseableKvIterator<K, V> _backing;
private final Function<V, V_T> _transformer;
private final Function<Class<?>, Class<?>> _classMapper;
public MappingKvIterator(CloseableKvIterator<K, V> backing, Function<V, V_T> transformer) {
public MappingKvIterator(CloseableKvIterator<K, V> backing, Function<V, V_T> transformer, Function<Class<?>, Class<?>> classMapper) {
_backing = backing;
_transformer = transformer;
_classMapper = classMapper;
}
@Override
@@ -18,6 +21,13 @@ public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements Close
return _backing.peekNextKey();
}
@Override
public Class<?> peekNextType() {
if (!hasNext())
throw new NoSuchElementException();
return _classMapper.apply(_backing.peekNextType());
}
@Override
public void skip() {
_backing.skip();
@@ -38,6 +48,13 @@ public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements Close
return _backing.peekPrevKey();
}
@Override
public Class<?> peekPrevType() {
if (!hasPrev())
throw new NoSuchElementException();
return _classMapper.apply(_backing.peekPrevType());
}
@Override
public Pair<K, V_T> prev() {
var got = _backing.prev();

View File

@@ -0,0 +1,7 @@
package com.usatiuk.dhfs.objects;
import java.util.Optional;
public interface MaybeTombstone<T> {
Optional<T> opt();
}

View File

@@ -0,0 +1,333 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import io.quarkus.logging.Log;
import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
import java.util.stream.Collectors;
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final NavigableMap<K, CloseableKvIterator<K, V>> _sortedIterators = new TreeMap<>();
private final String _name;
private Map<CloseableKvIterator<K, V>, Integer> _iterators;
private final IteratorStart _initialStartType;
private final K _initialStartKey;
private interface FirstMatchState<K extends Comparable<K>, V> {
}
private record FirstMatchNone<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
}
private record FirstMatchFound<K extends Comparable<K>, V>(
CloseableKvIterator<K, V> iterator) implements FirstMatchState<K, V> {
}
private record FirstMatchConsumed<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
}
// Fast path for the first element
private FirstMatchState<K, V> _firstMatchState;
private final List<IterProdFn<K, V>> _pendingIterators;
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
_goingForward = true;
_name = name;
_initialStartType = startType;
_initialStartKey = startKey;
{
int counter = 0;
var iteratorsTmp = new HashMap<CloseableKvIterator<K, V>, Integer>();
for (var iteratorFn : iterators) {
var iterator = iteratorFn.get(startType, startKey);
if ((counter == 0) // Not really a requirement but simplifies some things for now
&& (startType == IteratorStart.GE || startType == IteratorStart.LE)
&& iterator.hasNext()
&& iterator.peekNextKey().equals(startKey)) {
_firstMatchState = new FirstMatchFound<>(iterator);
_pendingIterators = iterators;
Log.tracev("{0} Created fast match: {1}", _name, _firstMatchState);
return;
}
iteratorsTmp.put(iterator, counter++);
}
_iterators = Map.copyOf(iteratorsTmp);
_pendingIterators = null;
}
_firstMatchState = new FirstMatchNone<>();
doInitialAdvance();
}
private void doInitialAdvance() {
if (_initialStartType == IteratorStart.LT || _initialStartType == IteratorStart.LE) {
// Starting at a greatest key less than/less or equal than:
// We have a bunch of iterators that have given us theirs "greatest LT/LE key"
// now we need to pick the greatest of those to start with
// But if some of them don't have a lesser key, we need to pick the smallest of those
var found = _iterators.keySet().stream()
.filter(CloseableKvIterator::hasNext)
.map((i) -> {
var peeked = i.peekNextKey();
// Log.warnv("peeked: {0}, from {1}", peeked, i.getClass());
return peeked;
}).distinct().collect(Collectors.partitioningBy(e -> _initialStartType == IteratorStart.LE ? e.compareTo(_initialStartKey) <= 0 : e.compareTo(_initialStartKey) < 0));
K initialMaxValue;
if (!found.get(true).isEmpty())
initialMaxValue = found.get(true).stream().max(Comparator.naturalOrder()).orElse(null);
else
initialMaxValue = found.get(false).stream().min(Comparator.naturalOrder()).orElse(null);
for (var iterator : _iterators.keySet()) {
while (iterator.hasNext() && iterator.peekNextKey().compareTo(initialMaxValue) < 0) {
iterator.skip();
}
}
}
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
advanceIterator(iterator);
}
Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
switch (_initialStartType) {
// case LT -> {
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
// }
// case LE -> {
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
// }
case GT -> {
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(_initialStartKey) > 0;
}
case GE -> {
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(_initialStartKey) >= 0;
}
}
}
private void doHydrate() {
if (_firstMatchState instanceof FirstMatchNone) {
return;
}
boolean consumed = _firstMatchState instanceof FirstMatchConsumed;
if (_firstMatchState instanceof FirstMatchFound(CloseableKvIterator iterator)) {
iterator.close();
}
_firstMatchState = new FirstMatchNone<>();
{
int counter = 0;
var iteratorsTmp = new HashMap<CloseableKvIterator<K, V>, Integer>();
for (var iteratorFn : _pendingIterators) {
var iterator = iteratorFn.get(consumed ? IteratorStart.GT : IteratorStart.GE, _initialStartKey);
iteratorsTmp.put(iterator, counter++);
}
_iterators = Map.copyOf(iteratorsTmp);
}
doInitialAdvance();
}
@SafeVarargs
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
this(name, startType, startKey, List.of(iterators));
}
private void advanceIterator(CloseableKvIterator<K, V> iterator) {
if (!iterator.hasNext()) {
return;
}
K key = iterator.peekNextKey();
Log.tracev("{0} Advance peeked: {1}-{2}", _name, iterator, key);
if (!_sortedIterators.containsKey(key)) {
_sortedIterators.put(key, iterator);
return;
}
// Expects that reversed iterator returns itself when reversed again
var oursPrio = _iterators.get(_goingForward ? iterator : iterator.reversed());
var them = _sortedIterators.get(key);
var theirsPrio = _iterators.get(_goingForward ? them : them.reversed());
if (oursPrio < theirsPrio) {
_sortedIterators.put(key, iterator);
advanceIterator(them);
} else {
Log.tracev("{0} Skipped: {1}", _name, iterator.peekNextKey());
iterator.skip();
advanceIterator(iterator);
}
}
@Override
protected void reverse() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
doHydrate();
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
}
default -> {
}
}
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
Log.tracev("{0} Reversing from {1}", _name, cur);
_goingForward = !_goingForward;
_sortedIterators.clear();
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
// _goingForward inverted already
advanceIterator(!_goingForward ? iterator.reversed() : iterator);
}
if (_sortedIterators.isEmpty() || cur == null) {
return;
}
// Advance to the expected key, as we might have brought back some iterators
// that were at their ends
while (!_sortedIterators.isEmpty()
&& ((_goingForward && peekImpl().compareTo(cur.getKey()) <= 0)
|| (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) {
skipImpl();
}
Log.tracev("{0} Reversed to {1}", _name, _sortedIterators);
}
@Override
protected K peekImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
return firstMatchFound.iterator.peekNextKey();
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
if (_sortedIterators.isEmpty())
throw new NoSuchElementException();
return _goingForward ? _sortedIterators.firstKey() : _sortedIterators.lastKey();
}
@Override
protected void skipImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
var curVal = firstMatchFound.iterator.next();
firstMatchFound.iterator.close();
_firstMatchState = new FirstMatchConsumed<>();
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, firstMatchFound.iterator, curVal, _sortedIterators.keySet());
return;
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
if (cur == null) {
throw new NoSuchElementException();
}
cur.getValue().skip();
advanceIterator(cur.getValue());
Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
}
@Override
protected boolean hasImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
return true;
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
return !_sortedIterators.isEmpty();
}
@Override
protected Pair<K, V> nextImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
var curVal = firstMatchFound.iterator.next();
firstMatchFound.iterator.close();
_firstMatchState = new FirstMatchConsumed<>();
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, firstMatchFound.iterator, curVal, _sortedIterators.keySet());
return curVal;
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
if (cur == null) {
throw new NoSuchElementException();
}
var curVal = cur.getValue().next();
advanceIterator(cur.getValue());
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, cur.getValue(), curVal, _sortedIterators.keySet());
return curVal;
}
@Override
protected Class<? > peekTypeImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
return firstMatchFound.iterator().peekNextType();
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
if (_sortedIterators.isEmpty())
throw new NoSuchElementException();
return _goingForward
? _sortedIterators.firstEntry().getValue().peekNextType()
: _sortedIterators.lastEntry().getValue().peekNextType();
}
@Override
public void close() {
if (_firstMatchState instanceof FirstMatchFound(CloseableKvIterator iterator)) {
iterator.close();
}
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
iterator.close();
}
}
@Override
public String toString() {
return "MergingKvIterator{" +
"_name='" + _name + '\'' +
", _sortedIterators=" + _sortedIterators.keySet() +
", _iterators=" + _iterators +
'}';
}
}

View File

@@ -1,5 +1,6 @@
package com.usatiuk.objects.iterators;
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
@@ -9,22 +10,22 @@ public class NavigableMapKvIterator<K extends Comparable<K>, V> extends Reversib
private Iterator<Map.Entry<K, V>> _iterator;
private Map.Entry<K, V> _next;
public NavigableMapKvIterator(NavigableMap<K, ? extends V> map, IteratorStart start, K key) {
_map = (NavigableMap<K, V>) map;
public NavigableMapKvIterator(NavigableMap<K, V> map, IteratorStart start, K key) {
_map = map;
SortedMap<K, V> _view;
_goingForward = true;
switch (start) {
case GE -> _view = _map.tailMap(key, true);
case GT -> _view = _map.tailMap(key, false);
case GE -> _view = map.tailMap(key, true);
case GT -> _view = map.tailMap(key, false);
case LE -> {
var floorKey = _map.floorKey(key);
var floorKey = map.floorKey(key);
if (floorKey == null) _view = _map;
else _view = _map.tailMap(floorKey, true);
else _view = map.tailMap(floorKey, true);
}
case LT -> {
var lowerKey = map.lowerKey(key);
if (lowerKey == null) _view = _map;
else _view = _map.tailMap(lowerKey, true);
else _view = map.tailMap(lowerKey, true);
}
default -> throw new IllegalArgumentException("Unknown start type");
}
@@ -90,6 +91,13 @@ public class NavigableMapKvIterator<K extends Comparable<K>, V> extends Reversib
return Pair.of(ret);
}
@Override
protected Class<? extends V> peekTypeImpl() {
if (_next == null)
throw new NoSuchElementException("No more elements");
return (Class<? extends V>) _next.getValue().getClass();
}
@Override
public void close() {
}

View File

@@ -1,11 +1,9 @@
package com.usatiuk.objects;
package com.usatiuk.dhfs.objects;
import com.google.protobuf.ByteString;
import java.nio.ByteBuffer;
public interface ObjectSerializer<T> {
ByteString serialize(T obj);
T deserialize(ByteBuffer data);
T deserialize(ByteString data);
}

View File

@@ -0,0 +1,4 @@
package com.usatiuk.dhfs.objects;
public record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry {
}

View File

@@ -0,0 +1,4 @@
package com.usatiuk.dhfs.objects;
public record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry {
}

View File

@@ -0,0 +1,5 @@
package com.usatiuk.dhfs.objects;
public interface PendingWriteEntry {
long bundleId();
}

View File

@@ -1,7 +1,4 @@
package com.usatiuk.objects.transaction;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
package com.usatiuk.dhfs.objects;
public interface PreCommitTxHook {
default void onChange(JObjectKey key, JData old, JData cur) {

View File

@@ -1,27 +1,29 @@
package com.usatiuk.objects.iterators;
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import io.quarkus.logging.Log;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.function.Function;
public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final MergingKvIterator<K, MaybeTombstone<V>> _backing;
private Pair<K, V> _next = null;
public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends ReversibleKvIterator<K, V_T> {
private final CloseableKvIterator<K, V> _backing;
private final Function<V, V_T> _transformer;
private Pair<K, V_T> _next = null;
private boolean _checkedNext = false;
public TombstoneSkippingIterator(IteratorStart start, K startKey, List<CloseableKvIterator<K, MaybeTombstone<V>>> iterators) {
public PredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<V, V_T> transformer) {
_goingForward = true;
_backing = new MergingKvIterator<>(start, startKey, iterators);
_backing = backing;
_transformer = transformer;
if (start == IteratorStart.GE || start == IteratorStart.GT)
return;
boolean shouldGoBack = false;
if (canHaveNext())
tryFillNext();
fillNext();
boolean shouldGoBack = false;
if (start == IteratorStart.LE) {
if (_next == null || _next.getKey().compareTo(startKey) > 0) {
shouldGoBack = true;
@@ -38,27 +40,34 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
_backing.skipPrev();
fillNext();
_goingForward = true;
if (_next != null)
_backing.skip();
_backing.skip();
fillNext();
}
switch (start) {
case LT -> {
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
}
case LE -> {
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
}
case GT -> {
assert _next == null || _next.getKey().compareTo(startKey) > 0;
}
case GE -> {
assert _next == null || _next.getKey().compareTo(startKey) >= 0;
}
}
}
private boolean canHaveNext() {
return (_goingForward ? _backing.hasNext() : _backing.hasPrev());
}
private boolean tryFillNext() {
var next = _goingForward ? _backing.next() : _backing.prev();
if (next.getValue() instanceof Tombstone<?>)
return false;
_next = Pair.of(next.getKey(), ((Data<V>) next.getValue()).value());
return true;
}
private void fillNext() {
while (_next == null && canHaveNext()) {
tryFillNext();
while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) {
var next = _goingForward ? _backing.next() : _backing.prev();
var transformed = _transformer.apply(next.getValue());
if (transformed == null)
continue;
_next = Pair.of(next.getKey(), transformed);
}
_checkedNext = true;
}
@@ -73,6 +82,9 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
else if (!_goingForward && !wasAtEnd)
_backing.skipPrev();
if (!wasAtEnd)
Log.tracev("Skipped in reverse: {0}", _next);
_next = null;
_checkedNext = false;
}
@@ -107,7 +119,7 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
}
@Override
protected Pair<K, V> nextImpl() {
protected Pair<K, V_T> nextImpl() {
if (!_checkedNext)
fillNext();
@@ -119,6 +131,17 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
return ret;
}
@Override
protected Class<?> peekTypeImpl() {
if (!_checkedNext)
fillNext();
if (_next == null)
throw new NoSuchElementException("No more elements");
return _next.getValue().getClass();
}
@Override
public void close() {
_backing.close();
@@ -127,6 +150,7 @@ public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends Rever
@Override
public String toString() {
return "PredicateKvIterator{" +
"_backing=" + _backing +
", _next=" + _next +
'}';
}

View File

@@ -1,8 +1,8 @@
package com.usatiuk.objects.iterators;
package com.usatiuk.dhfs.objects;
import org.apache.commons.lang3.tuple.Pair;
public class ReversedKvIterator<K extends Comparable<? super K>, V> implements CloseableKvIterator<K, V> {
public class ReversedKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
private final CloseableKvIterator<K, V> _backing;
public ReversedKvIterator(CloseableKvIterator<K, V> backing) {
@@ -29,6 +29,11 @@ public class ReversedKvIterator<K extends Comparable<? super K>, V> implements C
return _backing.peekPrevKey();
}
@Override
public Class<?> peekNextType() {
return _backing.peekPrevType();
}
@Override
public void skip() {
_backing.skipPrev();
@@ -39,6 +44,11 @@ public class ReversedKvIterator<K extends Comparable<? super K>, V> implements C
return _backing.peekNextKey();
}
@Override
public Class<?> peekPrevType() {
return _backing.peekNextType();
}
@Override
public Pair<K, V> prev() {
return _backing.next();

View File

@@ -1,4 +1,4 @@
package com.usatiuk.objects.iterators;
package com.usatiuk.dhfs.objects;
import org.apache.commons.lang3.tuple.Pair;
@@ -27,6 +27,8 @@ public abstract class ReversibleKvIterator<K extends Comparable<K>, V> implement
abstract protected Pair<K, V> nextImpl();
abstract protected Class<?> peekTypeImpl();
@Override
public K peekNextKey() {
ensureForward();
@@ -76,4 +78,15 @@ public abstract class ReversibleKvIterator<K extends Comparable<K>, V> implement
skipImpl();
}
@Override
public Class<?> peekNextType() {
ensureForward();
return peekTypeImpl();
}
@Override
public Class<?> peekPrevType() {
ensureBackward();
return peekTypeImpl();
}
}

View File

@@ -0,0 +1,10 @@
package com.usatiuk.dhfs.objects;
import java.util.Optional;
public record Tombstone<V>() implements MaybeTombstone<V> {
@Override
public Optional<V> opt() {
return Optional.empty();
}
}

View File

@@ -0,0 +1,95 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import io.quarkus.logging.Log;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
public class TombstoneMergingKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
private final CloseableKvIterator<K, V> _backing;
private final String _name;
private final Class<?> _returnType;
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, MaybeTombstone<V>>> iterators,
Class<?> returnType) {
_name = name;
_returnType = returnType;
_backing = new MappingKvIterator<>(new TypePredicateKvIterator<>(
new MergingKvIterator<>(name + "-merging", startType, startKey, iterators),
startType, startKey,
k -> {
assert !k.equals(MaybeTombstone.class);
assert Tombstone.class.isAssignableFrom(k) || Data.class.isAssignableFrom(k);
return Data.class.isAssignableFrom(k);
}), t -> (V) returnType.cast(Data.class.cast(t).value()), (t) -> returnType);
}
@SafeVarargs
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, Class<?> returnType, IterProdFn<K, MaybeTombstone<V>>... iterators) {
this(name, startType, startKey, List.of(iterators), returnType);
}
@Override
public K peekNextKey() {
return _backing.peekNextKey();
}
@Override
public Class<?> peekNextType() {
return _returnType;
}
@Override
public void skip() {
_backing.skip();
}
@Override
public K peekPrevKey() {
return _backing.peekPrevKey();
}
@Override
public Class<?> peekPrevType() {
return _returnType;
}
@Override
public Pair<K, V> prev() {
return _backing.prev();
}
@Override
public boolean hasPrev() {
return _backing.hasPrev();
}
@Override
public void skipPrev() {
_backing.skipPrev();
}
@Override
public void close() {
_backing.close();
}
@Override
public boolean hasNext() {
return _backing.hasNext();
}
@Override
public Pair<K, V> next() {
return _backing.next();
}
@Override
public String toString() {
return "TombstoneMergingKvIterator{" +
"_backing=" + _backing +
", _name='" + _name + '\'' +
'}';
}
}

View File

@@ -0,0 +1,102 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.transaction.Transaction;
import com.usatiuk.dhfs.objects.transaction.TransactionHandle;
import com.usatiuk.dhfs.utils.VoidFn;
import io.quarkus.logging.Log;
import java.util.function.Supplier;
public interface TransactionManager {
void begin();
TransactionHandle commit();
void rollback();
default <T> T runTries(Supplier<T> supplier, int tries) {
if (current() != null) {
return supplier.get();
}
begin();
T ret;
try {
ret = supplier.get();
} catch (TxCommitException txCommitException) {
rollback();
if (tries == 0) {
Log.error("Transaction commit failed", txCommitException);
throw txCommitException;
}
return runTries(supplier, tries - 1);
} catch (Throwable e) {
rollback();
throw e;
}
try {
commit();
return ret;
} catch (TxCommitException txCommitException) {
if (tries == 0) {
Log.error("Transaction commit failed", txCommitException);
throw txCommitException;
}
return runTries(supplier, tries - 1);
}
}
default TransactionHandle runTries(VoidFn fn, int tries) {
if (current() != null) {
fn.apply();
return new TransactionHandle() {
@Override
public void onFlush(Runnable runnable) {
current().onCommit(runnable);
}
};
}
begin();
try {
fn.apply();
} catch (TxCommitException txCommitException) {
rollback();
if (tries == 0) {
Log.error("Transaction commit failed", txCommitException);
throw txCommitException;
}
return runTries(fn, tries - 1);
} catch (Throwable e) {
rollback();
throw e;
}
try {
return commit();
} catch (TxCommitException txCommitException) {
if (tries == 0) {
Log.error("Transaction commit failed", txCommitException);
throw txCommitException;
}
return runTries(fn, tries - 1);
}
}
default TransactionHandle run(VoidFn fn) {
return runTries(fn, 10);
}
default <T> T run(Supplier<T> supplier) {
return runTries(supplier, 10);
}
default void executeTx(VoidFn fn) {
run(fn);
}
default <T> T executeTx(Supplier<T> supplier) {
return run(supplier);
}
Transaction current();
}

View File

@@ -0,0 +1,67 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.transaction.Transaction;
import com.usatiuk.dhfs.objects.transaction.TransactionHandle;
import com.usatiuk.dhfs.objects.transaction.TransactionPrivate;
import io.quarkus.logging.Log;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
@ApplicationScoped
public class TransactionManagerImpl implements TransactionManager {
private static final ThreadLocal<TransactionPrivate> _currentTransaction = new ThreadLocal<>();
@Inject
JObjectManager jObjectManager;
@Override
public void begin() {
if (_currentTransaction.get() != null) {
throw new IllegalStateException("Transaction already started");
}
Log.trace("Starting transaction");
var tx = jObjectManager.createTransaction();
_currentTransaction.set(tx);
}
@Override
public TransactionHandle commit() {
if (_currentTransaction.get() == null) {
throw new IllegalStateException("No transaction started");
}
Log.trace("Committing transaction");
try {
return jObjectManager.commit(_currentTransaction.get());
} catch (Throwable e) {
Log.trace("Transaction commit failed", e);
throw e;
} finally {
_currentTransaction.get().close();
_currentTransaction.remove();
}
}
@Override
public void rollback() {
if (_currentTransaction.get() == null) {
throw new IllegalStateException("No transaction started");
}
try {
jObjectManager.rollback(_currentTransaction.get());
} catch (Throwable e) {
Log.error("Transaction rollback failed", e);
throw e;
} finally {
_currentTransaction.get().close();
_currentTransaction.remove();
}
}
@Override
public Transaction current() {
return _currentTransaction.get();
}
}

View File

@@ -0,0 +1,11 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.transaction.TransactionObject;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import java.util.Optional;
public record TransactionObjectLocked<T extends JData>
(Optional<JDataVersionedWrapper> data, AutoCloseableNoThrow lock)
implements TransactionObject<T> {
}

View File

@@ -0,0 +1,10 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.transaction.TransactionObject;
import java.util.Optional;
public record TransactionObjectNoLock<T extends JData>
(Optional<JDataVersionedWrapper> data)
implements TransactionObject<T> {
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.objects.transaction;
package com.usatiuk.dhfs.objects;
public class TxCommitException extends RuntimeException {
public TxCommitException(String message) {

View File

@@ -0,0 +1,141 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import org.apache.commons.lang3.tuple.Pair;
import java.util.NoSuchElementException;
import java.util.function.Function;
public class TypePredicateKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final CloseableKvIterator<K, V> _backing;
private final Function<Class<?>, Boolean> _filter;
private K _next;
public TypePredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<Class<?>, Boolean> filter) {
_goingForward = true;
_backing = backing;
_filter = filter;
fillNext();
boolean shouldGoBack = false;
if (start == IteratorStart.LE) {
if (_next == null || _next.compareTo(startKey) > 0) {
shouldGoBack = true;
}
} else if (start == IteratorStart.LT) {
if (_next == null || _next.compareTo(startKey) >= 0) {
shouldGoBack = true;
}
}
if (shouldGoBack && _backing.hasPrev()) {
_goingForward = false;
_next = null;
fillNext();
if (_next != null)
_backing.skipPrev();
_goingForward = true;
// _backing.skip();
fillNext();
}
switch (start) {
case LT -> {
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
}
case LE -> {
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
}
case GT -> {
assert _next == null || _next.compareTo(startKey) > 0;
}
case GE -> {
assert _next == null || _next.compareTo(startKey) >= 0;
}
}
}
private void fillNext() {
while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) {
var next = _goingForward ? _backing.peekNextType() : _backing.peekPrevType();
if (!_filter.apply(next)) {
if (_goingForward)
_backing.skip();
else
_backing.skipPrev();
continue;
} else {
_next = _goingForward ? _backing.peekNextKey() : _backing.peekPrevKey();
}
}
}
@Override
protected void reverse() {
_goingForward = !_goingForward;
_next = null;
fillNext();
}
@Override
protected K peekImpl() {
if (_next == null)
throw new NoSuchElementException();
return _next;
}
@Override
protected void skipImpl() {
if (_next == null)
throw new NoSuchElementException();
_next = null;
if (_goingForward)
_backing.skip();
else
_backing.skipPrev();
fillNext();
}
@Override
protected boolean hasImpl() {
return _next != null;
}
@Override
protected Pair<K, V> nextImpl() {
if (_next == null)
throw new NoSuchElementException("No more elements");
var retKey = _next;
_next = null;
var nextType = _goingForward ? _backing.peekNextType() : _backing.peekPrevType();
var got = _goingForward ? _backing.next() : _backing.prev();
assert got.getKey().equals(retKey);
assert nextType.equals(got.getValue().getClass());
assert _filter.apply(got.getValue().getClass());
fillNext();
return got;
}
@Override
protected Class<?> peekTypeImpl() {
if (_next == null)
throw new NoSuchElementException("No more elements");
return _goingForward ? _backing.peekNextType() : _backing.peekPrevType();
}
@Override
public void close() {
_backing.close();
}
@Override
public String toString() {
return "KeyPredicateKvIterator{" +
"_backing=" + _backing +
", _next=" + _next +
'}';
}
}

View File

@@ -0,0 +1,499 @@
package com.usatiuk.dhfs.objects;
import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import com.usatiuk.dhfs.objects.persistence.TxManifestObj;
import com.usatiuk.dhfs.objects.transaction.TxRecord;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.pcollections.PSortedMap;
import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
@ApplicationScoped
public class WritebackObjectPersistentStore {
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
private final AtomicReference<PSortedMap<JObjectKey, PendingWriteEntry>> _pendingWrites = new AtomicReference<>(TreePMap.empty());
private final ReentrantReadWriteLock _pendingWritesVersionLock = new ReentrantReadWriteLock();
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
private final Object _flushWaitSynchronizer = new Object();
private final AtomicLong _lastWrittenTx = new AtomicLong(-1);
private final AtomicLong _counter = new AtomicLong();
private final AtomicLong _lastCommittedTx = new AtomicLong(-1);
private final AtomicLong _waitedTotal = new AtomicLong(0);
@Inject
CachingObjectPersistentStore cachedStore;
@ConfigProperty(name = "dhfs.objects.writeback.limit")
long sizeLimit;
private long currentSize = 0;
private ExecutorService _writebackExecutor;
private ExecutorService _statusExecutor;
private volatile boolean _ready = false;
void init(@Observes @Priority(110) StartupEvent event) {
{
BasicThreadFactory factory = new BasicThreadFactory.Builder()
.namingPattern("tx-writeback-%d")
.build();
_writebackExecutor = Executors.newSingleThreadExecutor(factory);
_writebackExecutor.submit(this::writeback);
}
_statusExecutor = Executors.newSingleThreadExecutor();
_statusExecutor.submit(() -> {
try {
while (true) {
Thread.sleep(1000);
if (currentSize > 0)
Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB");
}
} catch (InterruptedException ignored) {
}
});
_counter.set(cachedStore.getLastTxId());
_lastCommittedTx.set(cachedStore.getLastTxId());
_ready = true;
}
void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException {
Log.info("Waiting for all transactions to drain");
synchronized (_flushWaitSynchronizer) {
_ready = false;
while (currentSize > 0) {
_flushWaitSynchronizer.wait();
}
}
_writebackExecutor.shutdownNow();
Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms");
}
private void verifyReady() {
if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!");
}
private void writeback() {
while (!Thread.interrupted()) {
try {
TxBundle bundle = new TxBundle(0);
synchronized (_pendingBundles) {
while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready)
_pendingBundles.wait();
long diff = 0;
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var toCompress = _pendingBundles.poll();
diff -= toCompress.calculateTotalSize();
bundle.compress(toCompress);
}
diff += bundle.calculateTotalSize();
synchronized (_flushWaitSynchronizer) {
currentSize += diff;
}
}
var toWrite = new ArrayList<Pair<JObjectKey, JDataVersionedWrapper>>();
var toDelete = new ArrayList<JObjectKey>();
for (var e : bundle._entries.values()) {
switch (e) {
case TxBundle.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> {
Log.trace("Writing new " + key);
toWrite.add(Pair.of(key, data));
}
case TxBundle.DeletedEntry(JObjectKey key) -> {
Log.trace("Deleting from persistent storage " + key);
toDelete.add(key);
}
default -> throw new IllegalStateException("Unexpected value: " + e);
}
}
cachedStore.commitTx(
new TxManifestObj<>(
Collections.unmodifiableList(toWrite),
Collections.unmodifiableList(toDelete)
), bundle.getId());
Log.trace("Bundle " + bundle.getId() + " committed");
// Remove from pending writes, after real commit
// As we are the only writers to _pendingWrites, no need to synchronize with iterator creation
// if they get the older version, as it will still contain all the new changes
synchronized (_pendingBundles) {
var curPw = _pendingWrites.get();
for (var e : bundle._entries.values()) {
var cur = curPw.get(e.key());
if (cur.bundleId() <= bundle.getId())
curPw = curPw.minus(e.key());
}
_pendingWrites.set(curPw);
// No need to increment version
}
List<List<Runnable>> callbacks = new ArrayList<>();
synchronized (_notFlushedBundles) {
_lastWrittenTx.set(bundle.getId());
while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) {
callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted());
}
}
callbacks.forEach(l -> l.forEach(Runnable::run));
synchronized (_flushWaitSynchronizer) {
currentSize -= bundle.calculateTotalSize();
// FIXME:
if (currentSize <= sizeLimit || !_ready)
_flushWaitSynchronizer.notifyAll();
}
} catch (InterruptedException ignored) {
} catch (Exception e) {
Log.error("Uncaught exception in writeback", e);
} catch (Throwable o) {
Log.error("Uncaught THROWABLE in writeback", o);
}
}
Log.info("Writeback thread exiting");
}
public TxBundle createBundle() {
verifyReady();
boolean wait = false;
while (true) {
if (wait) {
synchronized (_flushWaitSynchronizer) {
long started = System.currentTimeMillis();
while (currentSize > sizeLimit) {
try {
_flushWaitSynchronizer.wait();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
long waited = System.currentTimeMillis() - started;
_waitedTotal.addAndGet(waited);
if (Log.isTraceEnabled())
Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms");
wait = false;
}
}
synchronized (_pendingBundles) {
synchronized (_flushWaitSynchronizer) {
if (currentSize > sizeLimit) {
if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var target = _pendingBundles.poll();
long diff = -target.calculateTotalSize();
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
var toCompress = _pendingBundles.poll();
diff -= toCompress.calculateTotalSize();
target.compress(toCompress);
}
diff += target.calculateTotalSize();
currentSize += diff;
_pendingBundles.addFirst(target);
}
}
if (currentSize > sizeLimit) {
wait = true;
continue;
}
}
synchronized (_notFlushedBundles) {
var bundle = new TxBundle(_counter.incrementAndGet());
_pendingBundles.addLast(bundle);
_notFlushedBundles.put(bundle.getId(), bundle);
return bundle;
}
}
}
}
public void commitBundle(TxBundle bundle) {
verifyReady();
_pendingWritesVersionLock.writeLock().lock();
try {
synchronized (_pendingBundles) {
var curPw = _pendingWrites.get();
for (var e : ((TxBundle) bundle)._entries.values()) {
switch (e) {
case TxBundle.CommittedEntry c -> {
curPw = curPw.plus(c.key(), new PendingWrite(c.data, bundle.getId()));
}
case TxBundle.DeletedEntry d -> {
curPw = curPw.plus(d.key(), new PendingDelete(d.key, bundle.getId()));
}
default -> throw new IllegalStateException("Unexpected value: " + e);
}
}
// Now, make the changes visible to new iterators
_pendingWrites.set(curPw);
((TxBundle) bundle).setReady();
if (_pendingBundles.peek() == bundle)
_pendingBundles.notify();
synchronized (_flushWaitSynchronizer) {
currentSize += ((TxBundle) bundle).calculateTotalSize();
}
}
assert bundle.getId() > _lastCommittedTx.get();
_lastCommittedTx.set(bundle.getId());
} finally {
_pendingWritesVersionLock.writeLock().unlock();
}
}
public void dropBundle(TxBundle bundle) {
verifyReady();
synchronized (_pendingBundles) {
Log.warn("Dropped bundle: " + bundle);
_pendingBundles.remove((TxBundle) bundle);
synchronized (_flushWaitSynchronizer) {
currentSize -= ((TxBundle) bundle).calculateTotalSize();
}
}
}
public void fence(long bundleId) {
var latch = new CountDownLatch(1);
asyncFence(bundleId, latch::countDown);
try {
latch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
public void asyncFence(long bundleId, Runnable fn) {
verifyReady();
if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!");
if (_lastWrittenTx.get() >= bundleId) {
fn.run();
return;
}
synchronized (_notFlushedBundles) {
if (_lastWrittenTx.get() >= bundleId) {
fn.run();
return;
}
_notFlushedBundles.get(bundleId).addCallback(fn);
}
}
private static class TxBundle {
private final LinkedHashMap<JObjectKey, BundleEntry> _entries = new LinkedHashMap<>();
private final ArrayList<Runnable> _callbacks = new ArrayList<>();
private long _txId;
private volatile boolean _ready = false;
private long _size = -1;
private boolean _wasCommitted = false;
private TxBundle(long txId) {
_txId = txId;
}
public long getId() {
return _txId;
}
public void setReady() {
_ready = true;
}
public void addCallback(Runnable callback) {
synchronized (_callbacks) {
if (_wasCommitted) throw new IllegalStateException();
_callbacks.add(callback);
}
}
public List<Runnable> setCommitted() {
synchronized (_callbacks) {
_wasCommitted = true;
return Collections.unmodifiableList(_callbacks);
}
}
public void commit(JDataVersionedWrapper obj) {
synchronized (_entries) {
_entries.put(obj.data().key(), new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize()));
}
}
public void delete(JObjectKey obj) {
synchronized (_entries) {
_entries.put(obj, new DeletedEntry(obj));
}
}
public long calculateTotalSize() {
if (_size >= 0) return _size;
_size = _entries.values().stream().mapToInt(BundleEntry::size).sum();
return _size;
}
public void compress(TxBundle other) {
if (_txId >= other._txId)
throw new IllegalArgumentException("Compressing an older bundle into newer");
_txId = other._txId;
_size = -1;
_entries.putAll(other._entries);
}
private interface BundleEntry {
JObjectKey key();
int size();
}
private record CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size)
implements BundleEntry {
}
private record DeletedEntry(JObjectKey key)
implements BundleEntry {
public int size() {
return 64;
}
}
}
public Optional<PendingWriteEntry> getPendingWrite(JObjectKey key) {
synchronized (_pendingBundles) {
return Optional.ofNullable(_pendingWrites.get().get(key));
}
}
@Nonnull
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
var pending = getPendingWrite(name).orElse(null);
return switch (pending) {
case PendingWrite write -> Optional.of(write.data());
case PendingDelete ignored -> Optional.empty();
case null -> cachedStore.readObject(name);
default -> throw new IllegalStateException("Unexpected value: " + pending);
};
}
public interface VerboseReadResult {
}
public record VerboseReadResultPersisted(Optional<JDataVersionedWrapper> data) implements VerboseReadResult {
}
public record VerboseReadResultPending(PendingWriteEntry pending) implements VerboseReadResult {
}
@Nonnull
public VerboseReadResult readObjectVerbose(JObjectKey key) {
var pending = getPendingWrite(key).orElse(null);
if (pending != null) {
return new VerboseReadResultPending(pending);
}
return new VerboseReadResultPersisted(cachedStore.readObject(key));
}
/**
* @param commitLocked - a function that will be called with a Consumer of a new transaction id,
* that will commit the transaction the changes in the store will be visible to new transactions
* only after the runnable is called
*/
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes, BiConsumer<Long, Runnable> commitLocked) {
var bundle = createBundle();
long bundleId = bundle.getId();
try {
for (var action : writes) {
switch (action) {
case TxRecord.TxObjectRecordWrite<?> write -> {
Log.trace("Flushing object " + write.key());
bundle.commit(new JDataVersionedWrapperImpl(write.data(), bundleId));
}
case TxRecord.TxObjectRecordDeleted deleted -> {
Log.trace("Deleting object " + deleted.key());
bundle.delete(deleted.key());
}
default -> {
throw new TxCommitException("Unexpected value: " + action.key());
}
}
}
} catch (Throwable t) {
dropBundle(bundle);
throw new TxCommitException(t.getMessage(), t);
}
Log.tracef("Committing transaction %d to storage", bundleId);
commitLocked.accept(bundleId, () -> {
commitBundle(bundle);
});
return r -> asyncFence(bundleId, r);
}
// Returns an iterator with a view of all commited objects
// Does not have to guarantee consistent view, snapshots are handled by upper layers
// Invalidated by commitBundle, but might return data after it has been really committed
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
Log.tracev("Getting writeback iterator: {0}, {1}", start, key);
_pendingWritesVersionLock.readLock().lock();
try {
var curPending = _pendingWrites.get();
return new TombstoneMergingKvIterator<>("writeback-ps", start, key, JDataVersionedWrapper.class,
(tS, tK) -> new MappingKvIterator<>(
new NavigableMapKvIterator<>(curPending, tS, tK),
e -> switch (e) {
case PendingWrite pw -> new Data<>(pw.data());
case PendingDelete d -> new Tombstone<>();
default -> throw new IllegalStateException("Unexpected value: " + e);
},
e -> {
if (PendingWrite.class.isAssignableFrom(e)) {
return Data.class;
} else if (PendingDelete.class.isAssignableFrom(e)) {
return Tombstone.class;
} else {
throw new IllegalStateException("Unexpected type: " + e);
}
}),
(tS, tK) -> cachedStore.getIterator(tS, tK));
} finally {
_pendingWritesVersionLock.readLock().unlock();
}
}
public long getLastTxId() {
_pendingWritesVersionLock.readLock().lock();
try {
return _lastCommittedTx.get();
} finally {
_pendingWritesVersionLock.readLock().unlock();
}
}
}

View File

@@ -0,0 +1,264 @@
package com.usatiuk.dhfs.objects.persistence;
import com.usatiuk.dhfs.objects.*;
import com.usatiuk.dhfs.utils.DataLocker;
import io.quarkus.logging.Log;
import io.quarkus.runtime.Startup;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.util.LinkedHashMap;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@ApplicationScoped
public class CachingObjectPersistentStore {
private final LinkedHashMap<JObjectKey, CacheEntry> _cache = new LinkedHashMap<>();
private TreePMap<JObjectKey, CacheEntry> _sortedCache = TreePMap.empty();
private long _cacheVersion = 0;
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
private final DataLocker _readerLocker = new DataLocker();
@Inject
SerializingObjectPersistentStore delegate;
@ConfigProperty(name = "dhfs.objects.lru.limit")
long sizeLimit;
@ConfigProperty(name = "dhfs.objects.lru.print-stats")
boolean printStats;
private long _curSize = 0;
private long _evict = 0;
private ExecutorService _statusExecutor = null;
@Startup
void init() {
if (printStats) {
_statusExecutor = Executors.newSingleThreadExecutor();
_statusExecutor.submit(() -> {
try {
while (true) {
Thread.sleep(10000);
if (_curSize > 0)
Log.info("Cache status: size=" + _curSize / 1024 / 1024 + "MB" + " evicted=" + _evict);
_evict = 0;
}
} catch (InterruptedException ignored) {
}
});
}
}
private void put(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
// Log.tracev("Adding {0} to cache: {1}", key, obj);
_lock.writeLock().lock();
try {
int size = obj.map(JDataVersionedWrapper::estimateSize).orElse(16);
_curSize += size;
CacheEntry entry = obj.<CacheEntry>map(v -> new CacheEntryYes(v, size)).orElse(new CacheEntryDeleted());
var old = _cache.putLast(key, entry);
_sortedCache = _sortedCache.plus(key, entry);
if (old != null)
_curSize -= old.size();
while (_curSize >= sizeLimit) {
var del = _cache.pollFirstEntry();
_sortedCache = _sortedCache.minus(del.getKey());
_curSize -= del.getValue().size();
_evict++;
}
} finally {
_lock.writeLock().unlock();
}
}
@Nonnull
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
_lock.readLock().lock();
try {
var got = _cache.get(name);
if (got != null) {
return switch (got) {
case CacheEntryYes yes -> Optional.of(yes.object());
case CacheEntryDeleted del -> Optional.empty();
default -> throw new IllegalStateException("Unexpected value: " + got);
};
}
} finally {
_lock.readLock().unlock();
}
try (var lock = _readerLocker.lock(name)) {
// TODO: This is possibly racy
// var got = delegate.readObject(name);
// put(name, got);
return delegate.readObject(name);
}
}
public void commitTx(TxManifestObj<? extends JDataVersionedWrapper> names, long txId) {
var serialized = delegate.prepareManifest(names);
Log.tracev("Committing: {0} writes, {1} deletes", names.written().size(), names.deleted().size());
delegate.commitTx(serialized, txId, (commit) -> {
_lock.writeLock().lock();
try {
// Make the changes visible atomically both in cache and in the underlying store
for (var write : names.written()) {
put(write.getLeft(), Optional.of(write.getRight()));
}
for (var del : names.deleted()) {
put(del, Optional.empty());
}
++_cacheVersion;
commit.run();
} finally {
_lock.writeLock().unlock();
}
});
Log.tracev("Committed: {0} writes, {1} deletes", names.written().size(), names.deleted().size());
}
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, JDataVersionedWrapper> {
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _delegate;
// This should be created under lock
private final long _curCacheVersion = _cacheVersion;
private CachingKvIterator(CloseableKvIterator<JObjectKey, JDataVersionedWrapper> delegate) {
_delegate = delegate;
}
@Override
public JObjectKey peekNextKey() {
return _delegate.peekNextKey();
}
@Override
public Class<?> peekNextType() {
return _delegate.peekNextType();
}
@Override
public void skip() {
_delegate.skip();
}
@Override
public void close() {
_delegate.close();
}
@Override
public boolean hasNext() {
return _delegate.hasNext();
}
@Override
public JObjectKey peekPrevKey() {
return _delegate.peekPrevKey();
}
@Override
public Class<?> peekPrevType() {
return _delegate.peekPrevType();
}
private void maybeCache(Pair<JObjectKey, JDataVersionedWrapper> prev) {
_lock.writeLock().lock();
try {
if (_cacheVersion != _curCacheVersion) {
Log.tracev("Not caching: {0}", prev);
} else {
Log.tracev("Caching: {0}", prev);
put(prev.getKey(), Optional.of(prev.getValue()));
}
} finally {
_lock.writeLock().unlock();
}
}
@Override
public Pair<JObjectKey, JDataVersionedWrapper> prev() {
var prev = _delegate.prev();
maybeCache(prev);
return prev;
}
@Override
public boolean hasPrev() {
return _delegate.hasPrev();
}
@Override
public void skipPrev() {
_delegate.skipPrev();
}
@Override
public Pair<JObjectKey, JDataVersionedWrapper> next() {
var next = _delegate.next();
maybeCache(next);
return next;
}
}
// Returns an iterator with a view of all commited objects
// Does not have to guarantee consistent view, snapshots are handled by upper layers
// Warning: it has a nasty side effect of global caching, so in this case don't even call next on it,
// if some objects are still in writeback
public CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> getIterator(IteratorStart start, JObjectKey key) {
_lock.readLock().lock();
try {
Log.tracev("Getting cache iterator: {0}, {1}", start, key);
var curSortedCache = _sortedCache;
return new MergingKvIterator<>("cache", start, key,
(mS, mK)
-> new MappingKvIterator<>(
new NavigableMapKvIterator<>(curSortedCache, mS, mK),
e -> switch (e) {
case CacheEntryYes pw -> new Data<>(pw.object());
case CacheEntryDeleted d -> new Tombstone<>();
default -> throw new IllegalStateException("Unexpected value: " + e);
},
e -> {
if (CacheEntryYes.class.isAssignableFrom(e)) {
return Data.class;
} else if (CacheEntryDeleted.class.isAssignableFrom(e)) {
return Tombstone.class;
} else {
throw new IllegalStateException("Unexpected type: " + e);
}
}),
(mS, mK)
-> new MappingKvIterator<>(new CachingKvIterator(delegate.getIterator(mS, mK)), Data::new, (d) -> Data.class));
} finally {
_lock.readLock().unlock();
}
}
private interface CacheEntry {
long size();
}
private record CacheEntryYes(JDataVersionedWrapper object, long size) implements CacheEntry {
}
private record CacheEntryDeleted() implements CacheEntry {
@Override
public long size() {
return 16;
}
}
public long getLastTxId() {
return delegate.getLastCommitId();
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.objects.iterators;
package com.usatiuk.dhfs.objects.persistence;
public enum IteratorStart {
LT,

View File

@@ -1,10 +1,12 @@
package com.usatiuk.objects.stores;
package com.usatiuk.dhfs.objects.persistence;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyMax;
import com.usatiuk.objects.JObjectKeyMin;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.objects.CloseableKvIterator;
import com.usatiuk.dhfs.objects.JObjectKey;
import com.usatiuk.dhfs.objects.KeyPredicateKvIterator;
import com.usatiuk.dhfs.objects.ReversibleKvIterator;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import com.usatiuk.dhfs.utils.RefcountedCloseable;
import io.quarkus.arc.properties.IfBuildProperty;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
@@ -23,10 +25,10 @@ import java.lang.ref.Cleaner;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.stream.Stream;
import java.util.*;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Consumer;
import static org.lmdbjava.DbiFlags.MDB_CREATE;
import static org.lmdbjava.Env.create;
@@ -34,25 +36,18 @@ import static org.lmdbjava.Env.create;
@ApplicationScoped
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "lmdb")
public class LmdbObjectPersistentStore implements ObjectPersistentStore {
private static final String DB_NAME = "objects";
private static final String DB_VER_OBJ_NAME_STR = "__DB_VER_OBJ";
private static final ByteBuffer DB_VER_OBJ_NAME;
@ConfigProperty(name = "dhfs.objects.persistence.lmdb.size", defaultValue = "1000000000000")
long lmdbSize;
static {
byte[] tmp = DB_VER_OBJ_NAME_STR.getBytes(StandardCharsets.ISO_8859_1);
var bb = ByteBuffer.allocateDirect(tmp.length);
bb.put(tmp);
bb.flip();
DB_VER_OBJ_NAME = bb.asReadOnlyBuffer();
}
private final Path _root;
private Env<ByteBuffer> _env;
private Dbi<ByteBuffer> _db;
private boolean _ready = false;
private final AtomicReference<RefcountedCloseable<Txn<ByteBuffer>>> _curReadTxn = new AtomicReference<>();
private long _lastTxId = 0;
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
private static final String DB_NAME = "objects";
private static final byte[] DB_VER_OBJ_NAME = "__DB_VER_OBJ".getBytes(StandardCharsets.UTF_8);
public LmdbObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) {
_root = Path.of(root).resolve("objects");
@@ -64,32 +59,27 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
_root.toFile().mkdirs();
}
_env = create()
.setMapSize(lmdbSize)
.setMapSize(1_000_000_000_000L)
.setMaxDbs(1)
.open(_root.toFile(), EnvFlags.MDB_NOTLS);
_db = _env.openDbi(DB_NAME, MDB_CREATE);
try (Txn<ByteBuffer> txn = _env.txnWrite()) {
var read = readTxId(txn);
if (read.isPresent()) {
Log.infov("Read tx id {0}", read.get());
} else {
var bbData = ByteBuffer.allocateDirect(8);
bbData.putLong(0);
bbData.flip();
_db.put(txn, DB_VER_OBJ_NAME.asReadOnlyBuffer(), bbData);
txn.commit();
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
bb.put(DB_VER_OBJ_NAME);
bb.flip();
try (Txn<ByteBuffer> txn = _env.txnRead()) {
var value = _db.get(txn, bb);
if (value != null) {
var ver = value.getLong();
Log.infov("Read version: {0}", ver);
_lastTxId = ver;
}
}
_ready = true;
}
private Optional<Long> readTxId(Txn<ByteBuffer> txn) {
var value = _db.get(txn, DB_VER_OBJ_NAME.asReadOnlyBuffer());
return Optional.ofNullable(value).map(ByteBuffer::getLong);
}
void shutdown(@Observes @Priority(900) ShutdownEvent event) throws IOException {
_ready = false;
_db.close();
@@ -100,140 +90,72 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
if (!_ready) throw new IllegalStateException("Wrong service order!");
}
@Nonnull
@Override
public Snapshot<JObjectKey, ByteBuffer> getSnapshot() {
var txn = _env.txnRead();
try {
long commitId = readTxId(txn).orElseThrow();
return new Snapshot<JObjectKey, ByteBuffer>() {
private final Txn<ByteBuffer> _txn = txn;
private final long _id = commitId;
private boolean _closed = false;
public Collection<JObjectKey> findAllObjects() {
// try (Txn<ByteBuffer> txn = env.txnRead()) {
// try (var cursor = db.openCursor(txn)) {
// var keys = List.of();
// while (cursor.next()) {
// keys.add(JObjectKey.fromBytes(cursor.key()));
// }
// return keys;
// }
// }
return List.of();
}
@Override
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
assert !_closed;
return List.of(new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn, start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR)));
}
@Nonnull
@Override
public Optional<ByteBuffer> readObject(JObjectKey name) {
assert !_closed;
var got = _db.get(_txn, name.toByteBuffer());
var ret = Optional.ofNullable(got).map(ByteBuffer::asReadOnlyBuffer);
return ret;
}
@Override
public long id() {
assert !_closed;
return _id;
}
@Override
public void close() {
assert !_closed;
_closed = true;
_txn.close();
}
};
} catch (Exception e) {
txn.close();
throw e;
@Nonnull
@Override
public Optional<ByteString> readObject(JObjectKey name) {
verifyReady();
try (Txn<ByteBuffer> txn = _env.txnRead()) {
var value = _db.get(txn, name.toByteBuffer());
return Optional.ofNullable(value).map(ByteString::copyFrom);
}
}
@Override
public Runnable prepareTx(TxManifestRaw names, long txId) {
verifyReady();
var txn = _env.txnWrite();
try {
for (var written : names.written()) {
var putBb = _db.reserve(txn, written.getKey().toByteBuffer(), written.getValue().size());
written.getValue().copyTo(putBb);
}
for (JObjectKey key : names.deleted()) {
_db.delete(txn, key.toByteBuffer());
}
assert txId > readTxId(txn).orElseThrow();
var bbData = ByteBuffer.allocateDirect(8);
bbData.putLong(txId);
bbData.flip();
_db.put(txn, DB_VER_OBJ_NAME.asReadOnlyBuffer(), bbData);
} catch (Throwable t) {
txn.close();
throw t;
}
return () -> {
try {
txn.commit();
} finally {
txn.close();
}
};
}
@Override
public long getTotalSpace() {
verifyReady();
return _root.toFile().getTotalSpace();
}
@Override
public long getFreeSpace() {
verifyReady();
return _root.toFile().getFreeSpace();
}
@Override
public long getUsableSpace() {
verifyReady();
return _root.toFile().getUsableSpace();
}
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>> {
private static final Cleaner CLEANER = Cleaner.create();
private final Txn<ByteBuffer> _txn; // Managed by the snapshot
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, ByteString> {
private final RefcountedCloseable<Txn<ByteBuffer>> _txn;
private final Cursor<ByteBuffer> _cursor;
private boolean _hasNext = false;
private static final Cleaner CLEANER = Cleaner.create();
private final MutableObject<Boolean> _closed = new MutableObject<>(false);
// private final Exception _allocationStacktrace = new Exception();
// private final Exception _allocationStacktrace = null;
private boolean _hasNext = false;
private JObjectKey _peekedNextKey = null;
private final Exception _allocationStacktrace = null;
LmdbKvIterator(Txn<ByteBuffer> txn, IteratorStart start, JObjectKey key) {
_txn = txn;
LmdbKvIterator(IteratorStart start, JObjectKey key) {
_goingForward = true;
_cursor = _db.openCursor(_txn);
_lock.readLock().lock();
try {
var got = _curReadTxn.get();
var refInc = Optional.ofNullable(got).map(RefcountedCloseable::ref).orElse(null);
if (refInc != null) {
_txn = got;
} else {
var newTxn = new RefcountedCloseable<>(_env.txnRead());
_curReadTxn.compareAndSet(got, newTxn);
_txn = newTxn;
}
} finally {
_lock.readLock().unlock();
}
_cursor = _db.openCursor(_txn.get());
var closedRef = _closed;
// var bt = _allocationStacktrace;
// CLEANER.register(this, () -> {
// if (!closedRef.getValue()) {
// Log.error("Iterator was not closed before GC, allocated at: {0}", bt);
// System.exit(-1);
// }
// });
var bt = _allocationStacktrace;
CLEANER.register(this, () -> {
if (!closedRef.getValue()) {
Log.error("Iterator was not closed before GC, allocated at: {0}", bt);
System.exit(-1);
}
});
verifyReady();
if (key instanceof JObjectKeyMin) {
_hasNext = _cursor.first();
return;
} else if (key instanceof JObjectKeyMax) {
_hasNext = _cursor.last();
return;
}
if (key.toByteBuffer().remaining() == 0) {
if (!_cursor.first())
return;
} else if (!_cursor.get(key.toByteBuffer(), GetOp.MDB_SET_RANGE)) {
if (!_cursor.get(key.toByteBuffer(), GetOp.MDB_SET_RANGE)) {
return;
}
@@ -272,24 +194,24 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
}
// var realGot = JObjectKey.fromByteBuffer(_cursor.key());
// _cursor.key().flip();
//
// switch (start) {
// case LT -> {
//// assert !_hasNext || realGot.compareTo(key) < 0;
// }
// case LE -> {
//// assert !_hasNext || realGot.compareTo(key) <= 0;
// }
// case GT -> {
// assert !_hasNext || realGot.compareTo(key) > 0;
// }
// case GE -> {
// assert !_hasNext || realGot.compareTo(key) >= 0;
// }
// }
// Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
var realGot = JObjectKey.fromByteBuffer(_cursor.key());
_cursor.key().flip();
switch (start) {
case LT -> {
// assert !_hasNext || realGot.compareTo(key) < 0;
}
case LE -> {
// assert !_hasNext || realGot.compareTo(key) <= 0;
}
case GT -> {
assert !_hasNext || realGot.compareTo(key) > 0;
}
case GE -> {
assert !_hasNext || realGot.compareTo(key) >= 0;
}
}
Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
}
@Override
@@ -299,6 +221,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
_closed.setValue(true);
_cursor.close();
_txn.unref();
}
@Override
@@ -317,7 +240,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
}
_goingForward = !_goingForward;
_peekedNextKey = null;
}
@Override
@@ -325,12 +247,8 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
if (!_hasNext) {
throw new NoSuchElementException("No more elements");
}
if (_peekedNextKey != null) {
return _peekedNextKey;
}
var ret = JObjectKey.fromByteBuffer(_cursor.key());
_cursor.key().flip();
_peekedNextKey = ret;
return ret;
}
@@ -340,7 +258,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
_hasNext = _cursor.next();
else
_hasNext = _cursor.prev();
_peekedNextKey = null;
}
@Override
@@ -349,21 +266,110 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
@Override
protected Pair<JObjectKey, MaybeTombstone<ByteBuffer>> nextImpl() {
protected Pair<JObjectKey, ByteString> nextImpl() {
if (!_hasNext) {
throw new NoSuchElementException("No more elements");
}
// TODO: Right now with java serialization it doesn't matter, it's all copied to arrays anyway
var val = _cursor.val();
Pair<JObjectKey, MaybeTombstone<ByteBuffer>> ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), new DataWrapper<>(val.asReadOnlyBuffer()));
// var val = _cursor.val();
// var bbDirect = UninitializedByteBuffer.allocateUninitialized(val.remaining());
// bbDirect.put(val);
// bbDirect.flip();
// var bs = UnsafeByteOperations.unsafeWrap(bbDirect);
// var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), bs);
var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), ByteString.copyFrom(_cursor.val()));
if (_goingForward)
_hasNext = _cursor.next();
else
_hasNext = _cursor.prev();
// Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
_peekedNextKey = null;
Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
return ret;
}
@Override
protected Class<? extends ByteString> peekTypeImpl() {
if (!_hasNext)
throw new NoSuchElementException();
return ByteString.class;
}
}
@Override
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
return new KeyPredicateKvIterator<>(new LmdbKvIterator(start, key), start, key, (k) -> !Arrays.equals(k.name().getBytes(StandardCharsets.UTF_8), DB_VER_OBJ_NAME));
}
@Override
public void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked) {
verifyReady();
try (Txn<ByteBuffer> txn = _env.txnWrite()) {
for (var written : names.written()) {
// TODO:
var bb = UninitializedByteBuffer.allocateUninitialized(written.getValue().size());
bb.put(written.getValue().asReadOnlyByteBuffer());
bb.flip();
_db.put(txn, written.getKey().toByteBuffer(), bb);
}
for (JObjectKey key : names.deleted()) {
_db.delete(txn, key.toByteBuffer());
}
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
bb.put(DB_VER_OBJ_NAME);
bb.flip();
var bbData = ByteBuffer.allocateDirect(8);
commitLocked.accept(() -> {
_lock.writeLock().lock();
try {
var realTxId = txId;
if (realTxId == -1)
realTxId = _lastTxId + 1;
assert realTxId > _lastTxId;
_lastTxId = realTxId;
bbData.putLong(realTxId);
bbData.flip();
_db.put(txn, bb, bbData);
_curReadTxn.set(null);
txn.commit();
} finally {
_lock.writeLock().unlock();
}
});
}
}
@Override
public long getTotalSpace() {
verifyReady();
return _root.toFile().getTotalSpace();
}
@Override
public long getFreeSpace() {
verifyReady();
return _root.toFile().getFreeSpace();
}
@Override
public long getUsableSpace() {
verifyReady();
return _root.toFile().getUsableSpace();
}
@Override
public long getLastCommitId() {
_lock.readLock().lock();
try {
return _lastTxId;
} finally {
_lock.readLock().unlock();
}
}
}

Some files were not shown because too many files have changed in this diff Show More