mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-29 04:57:48 +01:00
Compare commits
130 Commits
type-itera
...
59e8f6a6b4
| Author | SHA1 | Date | |
|---|---|---|---|
| 59e8f6a6b4 | |||
| 0292df7f0e | |||
| a6a4101bb0 | |||
| 59fa5dcf28 | |||
| 0f5fb8b8b6 | |||
| c087dd8971 | |||
| 14ddddd0ff | |||
| 9859378627 | |||
| e167c21d40 | |||
| 7dc8f486ea | |||
| da1a996e6f | |||
| bb52a3af0e | |||
| de0b868349 | |||
| d4d4e150c1 | |||
| c9b0400d50 | |||
| 94218330b1 | |||
| dbe2a72f7c | |||
| 643c53c894 | |||
| 29fdd3eb08 | |||
| e6ead10e7f | |||
| 04c5685fd5 | |||
| 7061117f56 | |||
| 67852fb37e | |||
| d48cc18e85 | |||
| 77177414eb | |||
| 83e0f6eb0a | |||
| a5727c01b1 | |||
| 711c4f5e28 | |||
| 45556f2b74 | |||
| 146870c281 | |||
| 9178e7ee2d | |||
| 7c605135c5 | |||
| 491afd454b | |||
| bb65aab166 | |||
| a4810c7ee4 | |||
| e42e076b77 | |||
| 513cbd717d | |||
| 075867daaa | |||
| 8e4ea67e53 | |||
| fb128882cb | |||
| cb8c50000a | |||
| 4c5cbfb5bf | |||
| 6bcec4a260 | |||
| df00584367 | |||
| ea4f041d6e | |||
| 3c37638db2 | |||
| 0e12a59f23 | |||
| 735dd605d7 | |||
| 194166109e | |||
| 68111a0c4f | |||
| b872c32a05 | |||
| 0e14b1cd39 | |||
| 17843952f2 | |||
| ffef8959df | |||
| cb909478dc | |||
| 06335b4b99 | |||
| 8351bec59a | |||
| 29663f575d | |||
| 0f8002dc2c | |||
| 5c50d572d0 | |||
| edebb6d8f0 | |||
| 5d620c64c5 | |||
| b998871e7f | |||
| 69eb96b10c | |||
| 8b3c0a6f2c | |||
| 4a19f69c38 | |||
| 8d40019f2c | |||
| 312cf18b27 | |||
| 6a8394852e | |||
| 3e69e5dfb9 | |||
| 8fdbaf5aa7 | |||
| 4d44e3541b | |||
| 18d5a7f90e | |||
| adcc5f464f | |||
| d9ded36891 | |||
| 038b873364 | |||
| 8f7869d87a | |||
| e0b4f97349 | |||
| 035f64df5a | |||
| 4c5fd91050 | |||
| 8559c9b984 | |||
| e80e33568b | |||
| 03850d3522 | |||
| 527395447c | |||
| 9108b27dd3 | |||
| 3bd0c4e2bb | |||
| c977b5f6c9 | |||
| c5a875c27f | |||
| ba6bb756bb | |||
| a63e7e59b3 | |||
| 9a02a554a1 | |||
| 892e5ca9b7 | |||
| c12bff3ee7 | |||
| 59a0b9a856 | |||
| 817d12a161 | |||
| 258c257778 | |||
| b0bb9121e7 | |||
| a224c6bd51 | |||
| 13ecdd3106 | |||
| 8a07f37566 | |||
| dc0e73b1aa | |||
| 16eb1d28d9 | |||
| 4f397cd2d4 | |||
| 6a20550353 | |||
| 92bca1e4e1 | |||
| 4bfa93fca4 | |||
| 7d762c70fa | |||
| 20daa857e6 | |||
| 97c0f002fb | |||
| f260bb0491 | |||
| a2e75dbdc7 | |||
| fa64dac9aa | |||
| 8fbdf50732 | |||
| be1f5d12c9 | |||
| 1fd3b9e5e0 | |||
| 8499e20823 | |||
| 842bd49246 | |||
| 1b0af6e883 | |||
| 667f8b3b42 | |||
| 0aca2c5dbb | |||
| 223ba20418 | |||
| ae17ab6ce9 | |||
| 6e37320e7c | |||
| d37dc944d0 | |||
| d483eba20d | |||
| 4cbb4ce2be | |||
| 5f85e944e3 | |||
| 4c90a74fea | |||
| 38ab6de85b | |||
| 29fd2826a3 |
4
.github/workflows/server.yml
vendored
4
.github/workflows/server.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
cache: maven
|
||||
|
||||
- name: Test with Maven
|
||||
run: cd dhfs-parent && mvn --batch-mode --update-snapshots package verify
|
||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
|
||||
|
||||
# - name: Build with Maven
|
||||
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-parent/server/target/quarkus-app
|
||||
path: dhfs-parent/dhfs-app/target/quarkus-app
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ always() }}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main" />
|
||||
<module name="server" />
|
||||
<option name="VM_PARAMETERS" value="--add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.app.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main" />
|
||||
<module name="server" />
|
||||
<option name="VM_PARAMETERS" value="--add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021" />
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.app.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=8080 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<name>Autoprotomap - Deployment</name>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc-deployment</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5-internal</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc-deployment</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>default-compile</id>
|
||||
<configuration>
|
||||
<annotationProcessorPaths>
|
||||
<path>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-processor</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
</path>
|
||||
</annotationProcessorPaths>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,78 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.arc.deployment.GeneratedBeanBuildItem;
|
||||
import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor;
|
||||
import io.quarkus.deployment.annotations.BuildProducer;
|
||||
import io.quarkus.deployment.annotations.BuildStep;
|
||||
import io.quarkus.deployment.builditem.ApplicationIndexBuildItem;
|
||||
import io.quarkus.gizmo.ClassCreator;
|
||||
import io.quarkus.gizmo.SignatureBuilder;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.jboss.jandex.ClassType;
|
||||
import org.jboss.jandex.Type;
|
||||
|
||||
class AutoprotomapProcessor {
|
||||
@BuildStep
|
||||
ProtoIndexBuildItem index(ApplicationIndexBuildItem jandex) {
|
||||
var ret = new ProtoIndexBuildItem();
|
||||
var annot = jandex.getIndex().getAnnotations(ProtoMirror.class);
|
||||
for (var a : annot) {
|
||||
var protoTarget = jandex.getIndex().getClassByName(((ClassType) a.value().value()).name());
|
||||
// if (!messageImplementors.contains(protoTarget))
|
||||
// throw new IllegalArgumentException("Expected " + protoTarget + " to be a proto message");
|
||||
System.out.println("Found: " + a.name().toString() + " at " + protoTarget.name().toString() + " of " + a.target().asClass().name().toString());
|
||||
ret.protoMsgToObj.put(protoTarget, a.target().asClass());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@BuildStep
|
||||
void generateProtoSerializer(ApplicationIndexBuildItem jandex,
|
||||
ProtoIndexBuildItem protoIndex,
|
||||
BuildProducer<GeneratedBeanBuildItem> generatedClasses) {
|
||||
try {
|
||||
for (var o : protoIndex.protoMsgToObj.entrySet()) {
|
||||
System.out.println("Generating " + o.getKey().toString() + " -> " + o.getValue().toString());
|
||||
var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedClasses);
|
||||
|
||||
var msgType = io.quarkus.gizmo.Type.classType(o.getKey().name());
|
||||
var objType = io.quarkus.gizmo.Type.classType(o.getValue().name());
|
||||
|
||||
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
|
||||
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
|
||||
msgType, objType);
|
||||
|
||||
var msgJType = Type.create(o.getKey().name(), Type.Kind.CLASS);
|
||||
var objJType = Type.create(o.getValue().name(), Type.Kind.CLASS);
|
||||
|
||||
try (ClassCreator classCreator = ClassCreator.builder()
|
||||
.className("com.usatiuk.autoprotomap.generated.for" + o.getKey().simpleName())
|
||||
.signature(SignatureBuilder.forClass().addInterface(type))
|
||||
.classOutput(gizmoAdapter)
|
||||
.setFinal(true)
|
||||
.build()) {
|
||||
classCreator.addAnnotation(Singleton.class);
|
||||
|
||||
var generator = new ProtoSerializerGenerator(
|
||||
jandex.getIndex(),
|
||||
protoIndex,
|
||||
classCreator,
|
||||
msgJType,
|
||||
objJType
|
||||
);
|
||||
|
||||
generator.generate();
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(e + "\n");
|
||||
for (var el : e.getStackTrace()) {
|
||||
sb.append(el.toString() + "\n");
|
||||
}
|
||||
System.out.println(sb);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
public class Constants {
|
||||
public static final String FIELD_PREFIX = "_";
|
||||
|
||||
public static String capitalize(String str) {
|
||||
return str.substring(0, 1).toUpperCase() + str.substring(1);
|
||||
}
|
||||
|
||||
public static String stripPrefix(String str, String prefix) {
|
||||
if (str.startsWith(prefix)) {
|
||||
return str.substring(prefix.length());
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface Effect {
|
||||
void apply();
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import io.quarkus.builder.item.SimpleBuildItem;
|
||||
import org.apache.commons.collections4.BidiMap;
|
||||
import org.apache.commons.collections4.bidimap.DualHashBidiMap;
|
||||
import org.jboss.jandex.ClassInfo;
|
||||
|
||||
public final class ProtoIndexBuildItem extends SimpleBuildItem {
|
||||
BidiMap<ClassInfo, ClassInfo> protoMsgToObj = new DualHashBidiMap<>();
|
||||
}
|
||||
@@ -1,342 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.deployment;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.Message;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.gizmo.*;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.jboss.jandex.Type;
|
||||
import org.jboss.jandex.*;
|
||||
import org.objectweb.asm.Opcodes;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static com.usatiuk.autoprotomap.deployment.Constants.*;
|
||||
|
||||
public class ProtoSerializerGenerator {
|
||||
private final Index index;
|
||||
private final ProtoIndexBuildItem protoIndex;
|
||||
private final ClassCreator classCreator;
|
||||
private final HashSet<Pair<ClassInfo, ClassInfo>> externalSerializers = new HashSet<>();
|
||||
private final Type topMessageType;
|
||||
private final Type topObjectType;
|
||||
|
||||
public ProtoSerializerGenerator(Index index, ProtoIndexBuildItem protoIndex, ClassCreator classCreator, Type topMessageType, Type topObjectType) {
|
||||
this.index = index;
|
||||
this.protoIndex = protoIndex;
|
||||
this.classCreator = classCreator;
|
||||
this.topMessageType = topMessageType;
|
||||
this.topObjectType = topObjectType;
|
||||
}
|
||||
|
||||
private FieldDescriptor getOutsideSerializer(ClassInfo messageClass, ClassInfo objectClass) {
|
||||
var name = messageClass.name().withoutPackagePrefix() + objectClass.name().withoutPackagePrefix() + "serializer";
|
||||
var msgType = io.quarkus.gizmo.Type.classType(messageClass.name());
|
||||
var objType = io.quarkus.gizmo.Type.classType(objectClass.name());
|
||||
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
|
||||
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
|
||||
msgType, objType);
|
||||
var sig = SignatureBuilder.forField().setType(type).build();
|
||||
var fd = FieldDescriptor.of(classCreator.getClassName(), name, ProtoSerializer.class);
|
||||
if (externalSerializers.add(Pair.of(messageClass, objectClass))) {
|
||||
var fc = classCreator.getFieldCreator(fd);
|
||||
fc.addAnnotation(Inject.class);
|
||||
fc.setSignature(sig);
|
||||
fc.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
private void traverseHierarchy(Index index, ClassInfo klass, Consumer<ClassInfo> visitor) {
|
||||
var cur = klass;
|
||||
while (true) {
|
||||
visitor.accept(cur);
|
||||
|
||||
var next = cur.superClassType().name();
|
||||
if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break;
|
||||
cur = index.getClassByName(next);
|
||||
}
|
||||
}
|
||||
|
||||
private ArrayList<FieldInfo> findAllFields(Index index, ClassInfo klass) {
|
||||
ArrayList<FieldInfo> ret = new ArrayList<>();
|
||||
traverseHierarchy(index, klass, cur -> {
|
||||
ret.addAll(cur.fields());
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
|
||||
private void generateBuilderUse(BytecodeCreator bytecodeCreator,
|
||||
ResultHandle builder,
|
||||
Type messageType, Type objectType,
|
||||
ResultHandle object) {
|
||||
var builderType = Type.create(DotName.createComponentized(messageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var objectClass = index.getClassByName(objectType.name().toString());
|
||||
|
||||
Function<String, String> getterGetter = objectClass.isRecord()
|
||||
? Function.identity()
|
||||
: s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX));
|
||||
|
||||
for (var f : findAllFields(index, objectClass)) {
|
||||
var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX);
|
||||
|
||||
Supplier<ResultHandle> get = () -> {
|
||||
if ((f.flags() & Opcodes.ACC_PUBLIC) != 0)
|
||||
return bytecodeCreator.readInstanceField(f, object);
|
||||
else {
|
||||
var fieldGetter = getterGetter.apply(f.name());
|
||||
return bytecodeCreator.invokeVirtualMethod(
|
||||
MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object);
|
||||
}
|
||||
};
|
||||
|
||||
Effect doSimpleCopy = () -> {
|
||||
var setter = MethodDescriptor.ofMethod(builderType.name().toString(), "set" + capitalize(consideredFieldName),
|
||||
builderType.name().toString(), f.type().toString());
|
||||
|
||||
var val = get.get();
|
||||
bytecodeCreator.invokeVirtualMethod(setter, builder, val);
|
||||
};
|
||||
|
||||
switch (f.type().kind()) {
|
||||
case CLASS -> {
|
||||
if (f.type().equals(Type.create(String.class)) || f.type().equals(Type.create(ByteString.class))) {
|
||||
doSimpleCopy.apply();
|
||||
} else {
|
||||
var builderGetter = "get" + capitalize(f.name()) + "Builder";
|
||||
var protoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(f.type().name()));
|
||||
var nestedBuilderType = Type.create(DotName.createComponentized(protoType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
var nestedBuilder = bytecodeCreator.invokeVirtualMethod(
|
||||
MethodDescriptor.ofMethod(builderType.toString(), builderGetter, nestedBuilderType.name().toString()), builder);
|
||||
|
||||
var val = get.get();
|
||||
|
||||
generateBuilderUse(bytecodeCreator, nestedBuilder, Type.create(protoType.name(), Type.Kind.CLASS), f.type(), val);
|
||||
}
|
||||
}
|
||||
case PRIMITIVE -> {
|
||||
doSimpleCopy.apply();
|
||||
}
|
||||
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
|
||||
case PARAMETERIZED_TYPE ->
|
||||
throw new UnsupportedOperationException("Parametrized types not supported yet");
|
||||
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
|
||||
default -> throw new IllegalStateException("Unexpected type: " + f.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ResultHandle generateConstructorUse(
|
||||
BytecodeCreator bytecodeCreator,
|
||||
ClassCreator classCreator,
|
||||
Type messageType, Type objectType,
|
||||
ResultHandle message
|
||||
) {
|
||||
var constructor = findAllArgsConstructor(index, index.getClassByName(objectType.name()));
|
||||
if (constructor == null) {
|
||||
throw new IllegalStateException("No constructor found for type: " + objectType.name());
|
||||
}
|
||||
var argMap = new ResultHandle[constructor.parametersCount()];
|
||||
|
||||
for (int i = 0; i < argMap.length; i++) {
|
||||
var type = constructor.parameterType(i);
|
||||
var strippedName = stripPrefix(constructor.parameterName(i), FIELD_PREFIX);
|
||||
|
||||
IntConsumer doSimpleCopy = (arg) -> {
|
||||
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
|
||||
type.name().toString());
|
||||
argMap[arg] = bytecodeCreator.invokeVirtualMethod(call, message);
|
||||
};
|
||||
|
||||
switch (type.kind()) {
|
||||
case CLASS -> {
|
||||
if (type.equals(Type.create(String.class)) || type.equals(Type.create(ByteString.class))) {
|
||||
doSimpleCopy.accept(i);
|
||||
} else {
|
||||
var nestedProtoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(type.name()));
|
||||
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
|
||||
nestedProtoType.name().toString());
|
||||
var nested = bytecodeCreator.invokeVirtualMethod(call, message);
|
||||
argMap[i] = generateConstructorUse(bytecodeCreator, classCreator, Type.create(nestedProtoType.name(), Type.Kind.CLASS), type, nested);
|
||||
}
|
||||
}
|
||||
case PRIMITIVE -> {
|
||||
doSimpleCopy.accept(i);
|
||||
}
|
||||
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
|
||||
case PARAMETERIZED_TYPE ->
|
||||
throw new UnsupportedOperationException("Parametrized types not supported yet");
|
||||
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
|
||||
default -> throw new IllegalStateException("Unexpected type: " + type);
|
||||
}
|
||||
}
|
||||
|
||||
return bytecodeCreator.newInstance(constructor, argMap);
|
||||
}
|
||||
|
||||
private MethodInfo findAllArgsConstructor(Index index, ClassInfo klass) {
|
||||
ArrayList<FieldInfo> fields = findAllFields(index, klass);
|
||||
|
||||
var fieldCount = fields.size();
|
||||
var fieldNames = fields.stream().map(f -> stripPrefix(f.name(), FIELD_PREFIX)).sorted().toList();
|
||||
var fieldNameToType = fields.stream().collect(Collectors.toMap(f -> stripPrefix(f.name(), FIELD_PREFIX), FieldInfo::type));
|
||||
|
||||
for (var m : klass.constructors()) {
|
||||
if (m.parametersCount() != fieldCount) continue;
|
||||
var parameterNames = m.parameters().stream().map(n -> stripPrefix(n.name(), FIELD_PREFIX)).sorted().toList();
|
||||
if (!Objects.equals(fieldNames, parameterNames)) continue;
|
||||
|
||||
for (var p : m.parameters()) {
|
||||
if (!Objects.equals(fieldNameToType.get(stripPrefix(p.name(), FIELD_PREFIX)), p.type())) continue;
|
||||
}
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public void generateAbstract() {
|
||||
var kids = Stream.concat(index.getAllKnownSubclasses(topObjectType.name()).stream(),
|
||||
index.getAllKnownImplementors(topObjectType.name()).stream())
|
||||
.filter(k -> !k.isAbstract() && !k.isInterface()).toList();
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("serialize",
|
||||
Message.class, Object.class)) {
|
||||
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
for (var nestedObjClass : kids) {
|
||||
System.out.println("Generating " + nestedObjClass.name() + " serializer for " + topObjectType.name());
|
||||
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
|
||||
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
|
||||
boolean doExternalCall = false;
|
||||
if (nestedMessageClass == null) {
|
||||
var msgInfo = index.getClassByName(topMessageType.name());
|
||||
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
|
||||
doExternalCall = true;
|
||||
}
|
||||
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var statement = method.ifTrue(method.instanceOf(arg, nestedObjClass.name().toString()));
|
||||
|
||||
try (var branch = statement.trueBranch()) {
|
||||
if (doExternalCall) {
|
||||
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
|
||||
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
|
||||
var serialized = branch.invokeInterfaceMethod(
|
||||
MethodDescriptor.ofMethod(ProtoSerializer.class,
|
||||
"serialize", Message.class, Object.class),
|
||||
serializerLoaded, arg);
|
||||
branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
|
||||
"set" + capitalize(nestedObjType.name().withoutPackagePrefix()),
|
||||
builderType.name().toString(), nestedMessageType.name().toString()), builder, serialized);
|
||||
} else {
|
||||
var nestedBuilderType = Type.create(DotName.createComponentized(nestedMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
var nestedBuilder = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
|
||||
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()) + "Builder",
|
||||
nestedBuilderType.name().toString()), builder);
|
||||
generateBuilderUse(branch, nestedBuilder, nestedMessageType, nestedObjType, arg);
|
||||
}
|
||||
var result = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
|
||||
branch.returnValue(result);
|
||||
}
|
||||
}
|
||||
method.throwException(IllegalArgumentException.class, "Unknown object type");
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
|
||||
Object.class, Message.class)) {
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
for (var nestedObjClass : kids) {
|
||||
System.out.println("Generating " + nestedObjClass.name() + " deserializer for " + topObjectType.name());
|
||||
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
|
||||
boolean doExternalCall = false;
|
||||
if (nestedMessageClass == null) {
|
||||
var msgInfo = index.getClassByName(topMessageType.name());
|
||||
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
|
||||
doExternalCall = true;
|
||||
}
|
||||
|
||||
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
|
||||
|
||||
var typeCheck = method.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
|
||||
"has" + capitalize(nestedObjType.name().withoutPackagePrefix()), boolean.class), arg);
|
||||
|
||||
var statement = method.ifTrue(typeCheck);
|
||||
|
||||
try (var branch = statement.trueBranch()) {
|
||||
var nestedMessage = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
|
||||
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()), nestedMessageType.name().toString()), arg);
|
||||
if (doExternalCall) {
|
||||
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
|
||||
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
|
||||
branch.returnValue(branch.invokeInterfaceMethod(
|
||||
MethodDescriptor.ofMethod(ProtoSerializer.class,
|
||||
"deserialize", Object.class, Message.class),
|
||||
serializerLoaded, nestedMessage));
|
||||
} else {
|
||||
branch.returnValue(generateConstructorUse(branch, classCreator, nestedMessageType, nestedObjType, nestedMessage));
|
||||
}
|
||||
}
|
||||
}
|
||||
method.throwException(IllegalArgumentException.class, "Unknown object type");
|
||||
}
|
||||
}
|
||||
|
||||
public void generate() {
|
||||
var objInfo = index.getClassByName(topObjectType.name());
|
||||
if (objInfo.isAbstract() || objInfo.isInterface()) {
|
||||
generateAbstract();
|
||||
return;
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("serialize",
|
||||
Message.class, Object.class)) {
|
||||
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
|
||||
|
||||
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
generateBuilderUse(method, builder, topMessageType, topObjectType, arg);
|
||||
|
||||
var result = method.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
|
||||
|
||||
method.returnValue(result);
|
||||
}
|
||||
|
||||
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
|
||||
Object.class, Message.class)) {
|
||||
method.setModifiers(Opcodes.ACC_PUBLIC);
|
||||
|
||||
var arg = method.getMethodParam(0);
|
||||
|
||||
method.returnValue(generateConstructorUse(method, classCreator, topMessageType, topObjectType, arg));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.test;
|
||||
|
||||
import io.quarkus.test.QuarkusDevModeTest;
|
||||
import org.jboss.shrinkwrap.api.ShrinkWrap;
|
||||
import org.jboss.shrinkwrap.api.spec.JavaArchive;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
||||
public class AutoprotomapDevModeTest {
|
||||
|
||||
// Start hot reload (DevMode) test with your extension loaded
|
||||
@RegisterExtension
|
||||
static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest()
|
||||
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
|
||||
|
||||
@Test
|
||||
public void writeYourOwnDevModeTest() {
|
||||
// Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information
|
||||
Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName());
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.test;
|
||||
|
||||
import io.quarkus.test.QuarkusUnitTest;
|
||||
import org.jboss.shrinkwrap.api.ShrinkWrap;
|
||||
import org.jboss.shrinkwrap.api.spec.JavaArchive;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
||||
public class AutoprotomapTest {
|
||||
|
||||
// Start unit test with your extension loaded
|
||||
@RegisterExtension
|
||||
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
|
||||
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
|
||||
|
||||
@Test
|
||||
public void writeYourOwnUnitTest() {
|
||||
// Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information
|
||||
Assertions.assertTrue(true, "Add some assertions to " + getClass().getName());
|
||||
}
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>autoprotomap-integration-tests</artifactId>
|
||||
<name>Autoprotomap - Integration Tests</name>
|
||||
|
||||
<properties>
|
||||
<skipITs>true</skipITs>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>integration-test</goal>
|
||||
<goal>verify</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<native.image.path>${project.build.directory}/${project.build.finalName}-runner
|
||||
</native.image.path>
|
||||
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
|
||||
<maven.home>${maven.home}</maven.home>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>native-image</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>native</name>
|
||||
</property>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipTests>${native.surefire.skip}</skipTests>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<properties>
|
||||
<skipITs>false</skipITs>
|
||||
<quarkus.native.enabled>true</quarkus.native.enabled>
|
||||
</properties>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(AbstractProto.class)
|
||||
public abstract class AbstractObject {
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class CustomObject extends AbstractObject {
|
||||
public int testNum = 0;
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class CustomObjectSerializer implements ProtoSerializer<CustomObjectProto, CustomObject> {
|
||||
@Override
|
||||
public CustomObject deserialize(CustomObjectProto message) {
|
||||
return new CustomObject(2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CustomObjectProto serialize(CustomObject object) {
|
||||
return CustomObjectProto.newBuilder().setTest(1).build();
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(InterfaceObjectProto.class)
|
||||
public interface InterfaceObject {
|
||||
String key();
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@ProtoMirror(NestedObjectProto.class)
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class NestedObject extends AbstractObject {
|
||||
public SimpleObject object;
|
||||
public String _nestedName;
|
||||
public ByteString _nestedSomeBytes;
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(RecordObjectProto.class)
|
||||
public record RecordObject(String key) implements InterfaceObject {
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(RecordObject2Proto.class)
|
||||
public record RecordObject2(String key, int value) implements InterfaceObject {
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
|
||||
@ProtoMirror(SimpleObjectProto.class)
|
||||
@AllArgsConstructor
|
||||
@Getter
|
||||
public class SimpleObject extends AbstractObject {
|
||||
public int numfield = 0;
|
||||
private String name;
|
||||
public ByteString someBytes;
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_package = "com.usatiuk.autoprotomap.it";
|
||||
option java_outer_classname = "TestProto";
|
||||
|
||||
package autoprotomap.test;
|
||||
|
||||
message SimpleObjectProto {
|
||||
int32 numfield = 1;
|
||||
string name = 2;
|
||||
bytes someBytes = 3;
|
||||
}
|
||||
|
||||
message NestedObjectProto {
|
||||
SimpleObjectProto object = 1;
|
||||
string nestedName = 2;
|
||||
bytes nestedSomeBytes = 3;
|
||||
}
|
||||
|
||||
message CustomObjectProto {
|
||||
int64 test = 1;
|
||||
}
|
||||
|
||||
message AbstractProto {
|
||||
oneof obj {
|
||||
NestedObjectProto nestedObject = 1;
|
||||
SimpleObjectProto simpleObject = 2;
|
||||
CustomObjectProto customObject = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message RecordObjectProto {
|
||||
string key = 1;
|
||||
}
|
||||
|
||||
message RecordObject2Proto {
|
||||
string key = 1;
|
||||
int32 value = 2;
|
||||
}
|
||||
|
||||
message InterfaceObjectProto {
|
||||
oneof obj {
|
||||
RecordObjectProto recordObject = 1;
|
||||
RecordObject2Proto recordObject2 = 2;
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
quarkus.package.jar.decompiler.enabled=true
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusIntegrationTest;
|
||||
|
||||
@QuarkusIntegrationTest
|
||||
public class AutoprotomapResourceIT extends AutoprotomapResourceTest {
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
|
||||
@QuarkusTest
|
||||
public class AutoprotomapResourceTest {
|
||||
@Inject
|
||||
ProtoSerializer<SimpleObjectProto, SimpleObject> simpleProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<NestedObjectProto, NestedObject> nestedProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<AbstractProto, AbstractObject> abstractProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<InterfaceObjectProto, InterfaceObject> interfaceProtoSerializer;
|
||||
|
||||
@Test
|
||||
public void testSimple() {
|
||||
var ret = simpleProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
|
||||
Assertions.assertEquals(1234, ret.getNumfield());
|
||||
Assertions.assertEquals("simple test", ret.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSomeBytes());
|
||||
|
||||
var des = simpleProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(1234, des.getNumfield());
|
||||
Assertions.assertEquals("simple test", des.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNested() {
|
||||
var ret = nestedProtoSerializer.serialize(
|
||||
new NestedObject(
|
||||
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
|
||||
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
|
||||
Assertions.assertEquals(333, ret.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", ret.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", ret.getNestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedSomeBytes());
|
||||
|
||||
var des = nestedProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(333, des.object.numfield);
|
||||
Assertions.assertEquals(333, des.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", des.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", des.get_nestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractSimple() {
|
||||
var ret = abstractProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
|
||||
Assertions.assertEquals(1234, ret.getSimpleObject().getNumfield());
|
||||
Assertions.assertEquals("simple test", ret.getSimpleObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSimpleObject().getSomeBytes());
|
||||
|
||||
var des = (SimpleObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(1234, des.getNumfield());
|
||||
Assertions.assertEquals("simple test", des.getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractCustom() {
|
||||
var ret = abstractProtoSerializer.serialize(new CustomObject(1234));
|
||||
Assertions.assertEquals(1, ret.getCustomObject().getTest());
|
||||
|
||||
var des = (CustomObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(2, des.getTestNum());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbstractNested() {
|
||||
var ret = abstractProtoSerializer.serialize(
|
||||
new NestedObject(
|
||||
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
|
||||
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
|
||||
Assertions.assertEquals(333, ret.getNestedObject().getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", ret.getNestedObject().getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getNestedObject().getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", ret.getNestedObject().getNestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedObject().getNestedSomeBytes());
|
||||
|
||||
var des = (NestedObject) abstractProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals(333, des.object.numfield);
|
||||
Assertions.assertEquals(333, des.getObject().getNumfield());
|
||||
Assertions.assertEquals("nested so", des.getObject().getName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
|
||||
Assertions.assertEquals("nested obj", des.get_nestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInterface() {
|
||||
var ret = interfaceProtoSerializer.serialize(new RecordObject("record test"));
|
||||
Assertions.assertEquals("record test", ret.getRecordObject().getKey());
|
||||
var des = (RecordObject) interfaceProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals("record test", des.key());
|
||||
|
||||
var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234));
|
||||
Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey());
|
||||
Assertions.assertEquals(1234, ret2.getRecordObject2().getValue());
|
||||
var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2);
|
||||
Assertions.assertEquals("record test 2", des2.key());
|
||||
Assertions.assertEquals(1234, des2.value());
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>Autoprotomap - Parent</name>
|
||||
|
||||
<modules>
|
||||
<module>deployment</module>
|
||||
<module>runtime</module>
|
||||
<module>integration-tests</module>
|
||||
</modules>
|
||||
|
||||
</project>
|
||||
@@ -1,63 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<name>Autoprotomap - Runtime</name>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>extension-descriptor</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<deployment>${project.groupId}:${project.artifactId}-deployment:${project.version}
|
||||
</deployment>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>default-compile</id>
|
||||
<configuration>
|
||||
<annotationProcessorPaths>
|
||||
<path>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-extension-processor</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
</path>
|
||||
</annotationProcessorPaths>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,12 +0,0 @@
|
||||
package com.usatiuk.autoprotomap.runtime;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.CLASS)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface ProtoMirror {
|
||||
Class<?> value() default Object.class;
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
name: Autoprotomap
|
||||
#description: Do something useful.
|
||||
metadata:
|
||||
# keywords:
|
||||
# - autoprotomap
|
||||
# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension
|
||||
# categories:
|
||||
# - "miscellaneous"
|
||||
# status: "preview"
|
||||
215
dhfs-parent/dhfs-app/pom.xml
Normal file
215
dhfs-parent/dhfs-app/pom.xml
Normal file
@@ -0,0 +1,215 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-app</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk18on</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.openhft</groupId>
|
||||
<artifactId>zero-allocation-hashing</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-scheduler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.slf4j</groupId>
|
||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>supportlib</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fuse</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
true
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
</junit.jupiter.execution.parallel.mode.default>
|
||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
0.5
|
||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<extensions>true</extensions>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs;
|
||||
package com.usatiuk.dhfs.app;
|
||||
|
||||
import io.quarkus.runtime.Quarkus;
|
||||
import io.quarkus.runtime.QuarkusApplication;
|
||||
@@ -1,10 +1,10 @@
|
||||
quarkus.grpc.server.use-separate-server=false
|
||||
dhfs.objects.peerdiscovery.port=42069
|
||||
dhfs.objects.peerdiscovery.interval=5s
|
||||
dhfs.objects.peerdiscovery.interval=4s
|
||||
dhfs.objects.peerdiscovery.broadcast=true
|
||||
dhfs.objects.sync.timeout=30
|
||||
dhfs.objects.sync.ping.timeout=5
|
||||
dhfs.objects.invalidation.threads=1
|
||||
dhfs.objects.invalidation.threads=16
|
||||
dhfs.objects.invalidation.delay=1000
|
||||
dhfs.objects.reconnect_interval=5s
|
||||
dhfs.objects.write_log=false
|
||||
@@ -15,26 +15,20 @@ dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=2097152
|
||||
# Writes strictly smaller than this will try to merge with blocks nearby
|
||||
dhfs.files.write_merge_threshold=0.8
|
||||
# If a merge would result in a block of greater size than this, stop merging
|
||||
dhfs.files.write_merge_limit=1.2
|
||||
# Don't take blocks of this size and above when merging
|
||||
dhfs.files.write_merge_max_chunk_to_take=1
|
||||
dhfs.files.write_last_chunk_limit=1.5
|
||||
dhfs.files.target_chunk_alignment=19
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.files.use_hash_for_chunks=false
|
||||
dhfs.objects.autosync.threads=2
|
||||
dhfs.objects.autosync.threads=16
|
||||
dhfs.objects.autosync.download-all=false
|
||||
dhfs.objects.move-processor.threads=4
|
||||
dhfs.objects.ref-processor.threads=4
|
||||
dhfs.objects.move-processor.threads=16
|
||||
dhfs.objects.ref-processor.threads=16
|
||||
dhfs.objects.opsender.batch-size=100
|
||||
dhfs.objects.lock_timeout_secs=2
|
||||
dhfs.local-discovery=true
|
||||
dhfs.peerdiscovery.timeout=5000
|
||||
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
|
||||
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
|
||||
dhfs.peerdiscovery.timeout=10000
|
||||
quarkus.log.category."com.usatiuk".min-level=TRACE
|
||||
quarkus.log.category."com.usatiuk".level=TRACE
|
||||
quarkus.http.insecure-requests=enabled
|
||||
quarkus.http.ssl.client-auth=required
|
||||
@@ -0,0 +1,44 @@
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Objects;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TestDataCleaner {
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
String tempDirectory;
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
||||
try {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
} catch (Exception ignored) {
|
||||
Log.warn("Couldn't cleanup test data on init");
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
public static void purgeDirectory(File dir) {
|
||||
try {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.error("Couldn't purge directory " + dir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -53,8 +53,8 @@ public class DhfsFuseIT {
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
@@ -66,13 +66,13 @@ public class DhfsFuseIT {
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -85,111 +85,111 @@ public class DhfsFuseIT {
|
||||
|
||||
@Test
|
||||
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void readWriteRewriteFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void createDelayedTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /root/dhfs_default/fuse/testf2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /dhfs_test/fuse/testf2").getExitCode());
|
||||
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
|
||||
"newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
|
||||
"newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void writeRewriteDelayedTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
// TODO: How this fits with the tree?
|
||||
@Test
|
||||
@Disabled
|
||||
void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
Log.info("Deleting");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
|
||||
0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
Log.info("Deleted");
|
||||
|
||||
// FIXME?
|
||||
@@ -197,83 +197,83 @@ public class DhfsFuseIT {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
Log.info("Creating");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Moving");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testf1 /root/dhfs_default/fuse/testf2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testf1 /dhfs_test/fuse/testf2").getExitCode());
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Reading");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void moveDirTest() throws IOException, InterruptedException, TimeoutException {
|
||||
Log.info("Creating");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testdir/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testdir").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testdir/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testdir/testf1").getStdout()));
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Moving");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testdir /root/dhfs_default/fuse/testdir2/testdirm").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testdir2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testdir /dhfs_test/fuse/testdir2/testdirm").getExitCode());
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Reading");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir2/testdirm/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testdir2/testdirm/testf1").getStdout()));
|
||||
}
|
||||
|
||||
|
||||
// TODO: This probably shouldn't be working right now
|
||||
@Test
|
||||
void removeAddHostTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request DELETE " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /root/dhfs_default/fuse/newfile1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /root/dhfs_default/fuse/newfile1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /dhfs_test/fuse/newfile1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /dhfs_test/fuse/newfile1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /root/dhfs_default/fuse/newfile2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /root/dhfs_default/fuse/newfile2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /dhfs_test/fuse/newfile2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /dhfs_test/fuse/newfile2").getExitCode());
|
||||
|
||||
Log.info("Re-adding");
|
||||
container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing removeAddHostTest");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
|
||||
Log.info(cat1);
|
||||
Log.info(cat2);
|
||||
Log.info(ls1);
|
||||
@@ -286,10 +286,10 @@ public class DhfsFuseIT {
|
||||
|
||||
@Test
|
||||
void dirConflictTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
|
||||
boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /root/dhfs_default/fuse/testf"),
|
||||
Pair.of(container2, "echo test2 >> /root/dhfs_default/fuse/testf")).parallel().map(p -> {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /dhfs_test/fuse/testf"),
|
||||
Pair.of(container2, "echo test2 >> /dhfs_test/fuse/testf")).parallel().map(p -> {
|
||||
try {
|
||||
return p.getLeft().execInContainer("/bin/sh", "-c", p.getRight()).getExitCode();
|
||||
} catch (Exception e) {
|
||||
@@ -298,8 +298,8 @@ public class DhfsFuseIT {
|
||||
}).anyMatch(r -> r != 0);
|
||||
Assumptions.assumeTrue(!createFail, "Failed creating one or more files");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var ls = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
|
||||
var cat = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
|
||||
var ls = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls);
|
||||
Log.info(cat);
|
||||
return cat.getStdout().contains("test1") && cat.getStdout().contains("test2");
|
||||
@@ -308,38 +308,38 @@ public class DhfsFuseIT {
|
||||
|
||||
@Test
|
||||
void dirCycleTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/a").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/b").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /root/dhfs_default/fuse/a/testfa").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /root/dhfs_default/fuse/b/testfb").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /root/dhfs_default/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/b").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /dhfs_test/fuse/a/testfa").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /dhfs_test/fuse/b/testfb").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /dhfs_test/fuse").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var c2ls = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f -exec cat {} \\;");
|
||||
var c2ls = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f -exec cat {} \\;");
|
||||
return c2ls.getExitCode() == 0 && c2ls.getStdout().contains("xqr489") && c2ls.getStdout().contains("ahinou");
|
||||
});
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container1.getContainerId()).exec();
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/a /root/dhfs_default/fuse/b").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/a /dhfs_test/fuse/b").getExitCode());
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container1.getContainerId()).exec();
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/b /root/dhfs_default/fuse/a").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/b /dhfs_test/fuse/a").getExitCode());
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing dirCycleTest");
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"));
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a"));
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b"));
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse"));
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/a"));
|
||||
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/b"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/a"));
|
||||
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/b"));
|
||||
|
||||
var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;");
|
||||
var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -maxdepth 3 -type f -exec cat {} \\;");
|
||||
Log.info(c1ls2);
|
||||
var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;");
|
||||
var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -maxdepth 3 -type f -exec cat {} \\;");
|
||||
Log.info(c2ls2);
|
||||
|
||||
return c1ls2.getStdout().contains("xqr489") && c1ls2.getStdout().contains("ahinou")
|
||||
@@ -349,4 +349,82 @@ public class DhfsFuseIT {
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
void removeAndMove() throws IOException, InterruptedException, TimeoutException {
|
||||
var client = DockerClientFactory.instance().client();
|
||||
Log.info("Creating");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
client.pauseContainerCmd(container1.getContainerId()).exec();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
Log.info("Removing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container1.getContainerId()).exec();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
Log.info("Moving");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testf1 /dhfs_test/fuse/testf2").getExitCode());
|
||||
Log.info("Listing");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
|
||||
Log.info("Reading");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
// Either removed, or moved
|
||||
// TODO: it always seems to be removed?
|
||||
Log.info("Reading both");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info("cat1: " + cat1);
|
||||
Log.info("cat2: " + cat2);
|
||||
Log.info("ls1: " + ls1);
|
||||
Log.info("ls2: " + ls2);
|
||||
|
||||
if (!ls1.getStdout().equals(ls2.getStdout())) {
|
||||
Log.info("Different ls?");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ls1.getStdout().trim().isEmpty() && ls2.getStdout().trim().isEmpty()) {
|
||||
Log.info("Both empty");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!cat1.getStdout().equals(cat2.getStdout())) {
|
||||
Log.info("Different cat?");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(cat1.getExitCode() == 0 && cat2.getExitCode() == 0 && ls1.getExitCode() == 0 && ls2.getExitCode() == 0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean hasMoved = cat1.getStdout().contains("tesempty") && cat2.getStdout().contains("tesempty")
|
||||
&& ls1.getStdout().contains("testf2") && !ls1.getStdout().contains("testf1")
|
||||
&& ls2.getStdout().contains("testf2") && !ls2.getStdout().contains("testf1");
|
||||
|
||||
boolean removed = !cat1.getStdout().contains("tesempty") && !cat2.getStdout().contains("tesempty")
|
||||
&& !ls1.getStdout().contains("testf2") && !ls1.getStdout().contains("testf1")
|
||||
&& !ls2.getStdout().contains("testf2") && !ls2.getStdout().contains("testf1");
|
||||
|
||||
if (hasMoved && removed) {
|
||||
Log.info("Both removed and moved");
|
||||
return false;
|
||||
}
|
||||
|
||||
return hasMoved || removed;
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -59,9 +59,9 @@ public class DhfsFusex3IT {
|
||||
|
||||
Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::start);
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Log.info(container1.getContainerId() + "=" + c1uuid);
|
||||
Log.info(container2.getContainerId() + "=" + c2uuid);
|
||||
@@ -92,25 +92,25 @@ public class DhfsFusex3IT {
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl1 = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl3 = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c3uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c3curl = container3.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
@@ -119,8 +119,8 @@ public class DhfsFusex3IT {
|
||||
|
||||
private boolean checkEmpty() throws IOException, InterruptedException {
|
||||
for (var container : List.of(container1, container2, container3)) {
|
||||
var found = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f");
|
||||
var foundWc = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f | wc -l");
|
||||
var found = container.execInContainer("/bin/sh", "-c", "find /dhfs_test/data/objs -type f");
|
||||
var foundWc = container.execInContainer("/bin/sh", "-c", "find /dhfs_test/data/objs -type f | wc -l");
|
||||
Log.info("Remaining objects in " + container.getContainerId() + ": " + found.toString() + " " + foundWc.toString());
|
||||
if (!(found.getExitCode() == 0 && foundWc.getExitCode() == 0 && Integer.parseInt(foundWc.getStdout().strip()) == emptyFileCount))
|
||||
return false;
|
||||
@@ -135,44 +135,47 @@ public class DhfsFusex3IT {
|
||||
|
||||
@Test
|
||||
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
@Test
|
||||
@Disabled
|
||||
void largerFileDeleteTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /dhfs_test/fuse && dd if=/dev/urandom of=10MB.bin bs=1M count=10").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /dhfs_test/fuse/10MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/10MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
@Disabled
|
||||
void largerFileDeleteTestNoDelays() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /dhfs_test/fuse && dd if=/dev/urandom of=10MB.bin bs=1M count=10").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /dhfs_test/fuse/10MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/10MB.bin").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
void gccHelloWorldTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include<stdio.h>\nint main(){printf(\"hello world\"); return 0;}' > /root/dhfs_default/fuse/hello.c").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && gcc hello.c").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include<stdio.h>\nint main(){printf(\"hello world\"); return 0;}' > /dhfs_test/fuse/hello.c").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /dhfs_test/fuse && gcc hello.c").getExitCode());
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var helloOut = container1.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
|
||||
var helloOut = container1.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
|
||||
Log.info(helloOut);
|
||||
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
|
||||
});
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var helloOut = container2.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
|
||||
var helloOut = container2.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
|
||||
Log.info(helloOut);
|
||||
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
|
||||
});
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var helloOut = container3.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
|
||||
var helloOut = container3.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
|
||||
Log.info(helloOut);
|
||||
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
|
||||
});
|
||||
@@ -180,20 +183,22 @@ public class DhfsFusex3IT {
|
||||
|
||||
@Test
|
||||
void removeHostTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var c3curl = container3.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request DELETE " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
Thread.sleep(10000);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -203,15 +208,15 @@ public class DhfsFusex3IT {
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
// Pauses needed as otherwise docker buffers some incoming packets
|
||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.pauseContainerCmd(container3.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container1.getContainerId()).exec();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container3.getContainerId()).exec();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
@@ -220,8 +225,8 @@ public class DhfsFusex3IT {
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
for (var c : List.of(container1, container2, container3)) {
|
||||
var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
|
||||
var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
|
||||
var ls = c.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat = c.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls);
|
||||
Log.info(cat);
|
||||
if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3")))
|
||||
@@ -231,35 +236,35 @@ public class DhfsFusex3IT {
|
||||
});
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
|
||||
container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
|
||||
container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout());
|
||||
return container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout()) &&
|
||||
container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout()) &&
|
||||
container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*").getStdout());
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void fileConflictTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container1.getContainerId()).exec();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
// Pauses needed as otherwise docker buffers some incoming packets
|
||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.pauseContainerCmd(container3.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container1.getContainerId()).exec();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container3.getContainerId()).exec();
|
||||
Log.warn("Waiting for connections");
|
||||
@@ -268,12 +273,29 @@ public class DhfsFusex3IT {
|
||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
Log.warn("Connected");
|
||||
|
||||
// TODO: There's some issue with cache, so avoid file reads
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency 1");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var ls3 = container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
Log.info(ls1);
|
||||
Log.info(ls2);
|
||||
Log.info(ls3);
|
||||
|
||||
return (ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && ls3.getExitCode() == 0)
|
||||
&& (ls1.getStdout().equals(ls2.getStdout()) && ls2.getStdout().equals(ls3.getStdout()));
|
||||
});
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing");
|
||||
for (var c : List.of(container1, container2, container3)) {
|
||||
var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
|
||||
var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
|
||||
var ls = c.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat = c.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls);
|
||||
Log.info(cat);
|
||||
if (!(cat.getExitCode() == 0 && ls.getExitCode() == 0))
|
||||
return false;
|
||||
if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3")))
|
||||
return false;
|
||||
}
|
||||
@@ -281,12 +303,24 @@ public class DhfsFusex3IT {
|
||||
});
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
|
||||
container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
|
||||
container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals(
|
||||
container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout());
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls3 = container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat3 = container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
Log.info(ls3);
|
||||
Log.info(cat3);
|
||||
|
||||
return (ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && ls3.getExitCode() == 0)
|
||||
&& (cat1.getExitCode() == 0 && cat2.getExitCode() == 0 && cat3.getExitCode() == 0)
|
||||
&& (cat1.getStdout().equals(cat2.getStdout()) && cat2.getStdout().equals(cat3.getStdout()))
|
||||
&& (ls1.getStdout().equals(ls2.getStdout()) && ls2.getStdout().equals(ls3.getStdout()));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -83,7 +83,11 @@ public class DhfsImage implements Future<String> {
|
||||
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
|
||||
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",
|
||||
"-Ddhfs.objects.periodic-push-op-interval=5s",
|
||||
"-Ddhfs.fuse.root=/dhfs_test/fuse",
|
||||
"-Ddhfs.objects.persistence.files.root=/dhfs_test/data",
|
||||
"-Ddhfs.objects.persistence.stuff.root=/dhfs_test/data/stuff",
|
||||
"-jar", "/app/quarkus-run.jar")
|
||||
.run("mkdir /dhfs_test && chmod 777 /dhfs_test")
|
||||
.build())
|
||||
.withFileFromPath("/app", Paths.get(buildPath, "quarkus-app"))
|
||||
.withFileFromPath("/libs", Paths.get(nativeLibsDirectory));
|
||||
@@ -0,0 +1,182 @@
|
||||
package com.usatiuk.dhfs.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfs.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testcontainers.DockerClientFactory;
|
||||
import org.testcontainers.containers.GenericContainer;
|
||||
import org.testcontainers.containers.Network;
|
||||
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||
import org.testcontainers.containers.output.WaitingConsumer;
|
||||
import org.testcontainers.containers.wait.strategy.Wait;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
public class KillIT {
|
||||
GenericContainer<?> container1;
|
||||
GenericContainer<?> container2;
|
||||
|
||||
WaitingConsumer waitingConsumer1;
|
||||
WaitingConsumer waitingConsumer2;
|
||||
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
File data1;
|
||||
File data2;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
data1 = Files.createTempDirectory("").toFile();
|
||||
data2 = Files.createTempDirectory("").toFile();
|
||||
|
||||
Network network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
|
||||
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTest(TestInfo testInfo) throws Exception {
|
||||
var executor = Executors.newFixedThreadPool(2);
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.start();
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs(TestInfo testInfo) throws Exception {
|
||||
var executor = Executors.newFixedThreadPool(2);
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.start();
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -59,77 +59,113 @@ public class ResyncIT {
|
||||
|
||||
@Test
|
||||
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
void manyFiles() throws IOException, InterruptedException, TimeoutException {
|
||||
var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test$i; done");
|
||||
var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /dhfs_test/fuse/test$i; done");
|
||||
Assertions.assertEquals(0, ret.getExitCode());
|
||||
var foundWc = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
|
||||
var foundWc = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
|
||||
Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip()));
|
||||
|
||||
ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test-2-$i; done");
|
||||
ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /dhfs_test/fuse/test-2-$i; done");
|
||||
Assertions.assertEquals(0, ret.getExitCode());
|
||||
foundWc = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
|
||||
foundWc = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
|
||||
Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip()));
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/objects-manage/known-peers");
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
|
||||
await().atMost(120, TimeUnit.SECONDS).until(() -> {
|
||||
var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
|
||||
return 400 == Integer.valueOf(foundWc2.getStdout().strip());
|
||||
});
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
|
||||
await().atMost(120, TimeUnit.SECONDS).until(() -> {
|
||||
var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
|
||||
return 400 == Integer.valueOf(foundWc2.getStdout().strip());
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void folderAfterMove() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testd1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty1 > /dhfs_test/fuse/testd1/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testd1 /dhfs_test/fuse/testd2").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty2 > /dhfs_test/fuse/testd2/testf2").getExitCode());
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty1\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testd2/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty2\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testd2/testf2").getStdout()));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -31,6 +31,7 @@ services:
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
@@ -3,8 +3,8 @@
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>server</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
@@ -23,26 +23,13 @@
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk18on</artifactId>
|
||||
<version>1.78.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk18on</artifactId>
|
||||
<version>1.78.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
@@ -85,11 +72,6 @@
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
@@ -131,6 +113,10 @@
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
@@ -151,6 +137,11 @@
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
@@ -167,6 +158,11 @@
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@@ -197,7 +193,6 @@
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
@@ -1,11 +1,11 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.dhfs.objects.JDataRemote;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
|
||||
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote {
|
||||
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return data.size();
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.persistence.ChunkDataP;
|
||||
import com.usatiuk.dhfs.persistence.JObjectKeyP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class ChunkDataProtoSerializer implements ProtoSerializer<ChunkDataP, ChunkData> {
|
||||
@Override
|
||||
public ChunkData deserialize(ChunkDataP message) {
|
||||
return new ChunkData(
|
||||
JObjectKey.of(message.getKey().getName()),
|
||||
message.getData()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChunkDataP serialize(ChunkData object) {
|
||||
return ChunkDataP.newBuilder()
|
||||
.setKey(JObjectKeyP.newBuilder().setName(object.key().value()).build())
|
||||
.setData(object.data())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
@@ -1,34 +1,31 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.JDataRemote;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.objects.jmap.JMapHolder;
|
||||
import com.usatiuk.dhfs.objects.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.jmap.JMapHolder;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
|
||||
public record File(JObjectKey key, long mode, long cTime, long mTime,
|
||||
boolean symlink, long size
|
||||
boolean symlink
|
||||
) implements JDataRemote, JMapHolder<JMapLongKey> {
|
||||
public File withSymlink(boolean symlink) {
|
||||
return new File(key, mode, cTime, mTime, symlink, size);
|
||||
}
|
||||
|
||||
public File withSize(long size) {
|
||||
return new File(key, mode, cTime, mTime, symlink, size);
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withMode(long mode) {
|
||||
return new File(key, mode, cTime, mTime, symlink, size);
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withCTime(long cTime) {
|
||||
return new File(key, mode, cTime, mTime, symlink, size);
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withMTime(long mTime) {
|
||||
return new File(key, mode, cTime, mTime, symlink, size);
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -42,4 +39,9 @@ public record File(JObjectKey key, long mode, long cTime, long mTime,
|
||||
return 64;
|
||||
// return chunks.size() * 64;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends JDataRemoteDto> dtoClass() {
|
||||
return FileDto.class;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
|
||||
@Override
|
||||
public Class<? extends JDataRemote> objClass() {
|
||||
return File.class;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.repository.syncmap.DtoMapper;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
@ApplicationScoped
|
||||
public class FileDtoMapper implements DtoMapper<File, FileDto> {
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
@Inject
|
||||
FileHelper fileHelper;
|
||||
|
||||
@Override
|
||||
public FileDto toDto(File obj) {
|
||||
return new FileDto(obj, fileHelper.getChunks(obj));
|
||||
}
|
||||
|
||||
@Override
|
||||
public File fromDto(FileDto dto) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
@ApplicationScoped
|
||||
public class FileHelper {
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
public List<Pair<Long, JObjectKey>> getChunks(File file) {
|
||||
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
|
||||
try (var it = jMapHelper.getIterator(file)) {
|
||||
while (it.hasNext()) {
|
||||
var cur = it.next();
|
||||
chunks.add(Pair.of(cur.getKey().key(), cur.getValue().ref()));
|
||||
}
|
||||
}
|
||||
return List.copyOf(chunks);
|
||||
}
|
||||
|
||||
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
|
||||
jMapHelper.deleteAll(file);
|
||||
|
||||
for (var f : chunks) {
|
||||
jMapHelper.put(file, JMapLongKey.of(f.getLeft()), f.getRight());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.persistence.FileDtoP;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@Singleton
|
||||
public class FileProtoSerializer implements ProtoSerializer<FileDtoP, FileDto> {
|
||||
@Override
|
||||
public FileDto deserialize(FileDtoP message) {
|
||||
try (var is = message.getSerializedData().newInput()) {
|
||||
return SerializationHelper.deserialize(is);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileDtoP serialize(FileDto object) {
|
||||
return FileDtoP.newBuilder().setSerializedData(SerializationHelper.serialize(object)).build();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,241 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.PeerId;
|
||||
import com.usatiuk.dhfs.RemoteObjectDataWrapper;
|
||||
import com.usatiuk.dhfs.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.repository.ObjSyncHandler;
|
||||
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.repository.SyncHelper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.pcollections.HashPMap;
|
||||
import org.pcollections.HashTreePMap;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
@ApplicationScoped
|
||||
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
@Inject
|
||||
RemoteTransaction remoteTx;
|
||||
@Inject
|
||||
FileHelper fileHelper;
|
||||
|
||||
@Inject
|
||||
JKleppmannTreeManager jKleppmannTreeManager;
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
|
||||
}
|
||||
|
||||
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
|
||||
@Nullable FileDto receivedData) {
|
||||
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
|
||||
|
||||
if (!oursCurMeta.knownType().isAssignableFrom(File.class))
|
||||
throw new IllegalStateException("Object type mismatch: " + oursCurMeta.knownType() + " vs " + File.class);
|
||||
|
||||
if (!oursCurMeta.knownType().equals(File.class))
|
||||
oursCurMeta = oursCurMeta.withKnownType(File.class);
|
||||
|
||||
curTx.put(oursCurMeta);
|
||||
|
||||
var oursCurFile = remoteTx.getDataLocal(File.class, key).orElse(null);
|
||||
if (oursCurFile == null)
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
|
||||
|
||||
var theirsFile = receivedData.file();
|
||||
|
||||
var oursChunks = fileHelper.getChunks(oursCurFile);
|
||||
|
||||
File first;
|
||||
File second;
|
||||
List<Pair<Long, JObjectKey>> firstChunks;
|
||||
List<Pair<Long, JObjectKey>> secondChunks;
|
||||
PeerId otherHostname;
|
||||
|
||||
if (oursCurFile.mTime() >= theirsFile.mTime()) {
|
||||
first = oursCurFile;
|
||||
firstChunks = oursChunks;
|
||||
second = theirsFile;
|
||||
secondChunks = receivedData.chunks();
|
||||
otherHostname = from;
|
||||
} else {
|
||||
second = oursCurFile;
|
||||
secondChunks = oursChunks;
|
||||
first = theirsFile;
|
||||
firstChunks = receivedData.chunks();
|
||||
otherHostname = persistentPeerDataService.getSelfUuid();
|
||||
}
|
||||
|
||||
Log.tracev("Conflict resolution: ours: {0}, theirs: {1}, chunks: {2}, {3}", oursCurFile, theirsFile, oursChunks, receivedData.chunks());
|
||||
Log.tracev("Conflict resolution: first: {0}, second: {1}, chunks: {2}, {3}", first, second, firstChunks, secondChunks);
|
||||
|
||||
HashPMap<PeerId, Long> newChangelog = HashTreePMap.from(oursCurMeta.changelog());
|
||||
|
||||
for (var entry : receivedChangelog.entrySet()) {
|
||||
newChangelog = newChangelog.plus(entry.getKey(),
|
||||
Long.max(newChangelog.getOrDefault(entry.getKey(), 0L), entry.getValue())
|
||||
);
|
||||
}
|
||||
|
||||
oursCurMeta = oursCurMeta.withChangelog(newChangelog);
|
||||
curTx.put(oursCurMeta);
|
||||
|
||||
boolean chunksDiff = !Objects.equals(firstChunks, secondChunks);
|
||||
|
||||
boolean wasChanged = first.mTime() != second.mTime()
|
||||
|| first.cTime() != second.cTime()
|
||||
|| first.mode() != second.mode()
|
||||
|| first.symlink() != second.symlink()
|
||||
|| chunksDiff;
|
||||
|
||||
if (wasChanged) {
|
||||
oursCurMeta = oursCurMeta.withChangelog(
|
||||
newChangelog.plus(persistentPeerDataService.getSelfUuid(), newChangelog.getOrDefault(persistentPeerDataService.getSelfUuid(), 0L) + 1)
|
||||
);
|
||||
curTx.put(oursCurMeta);
|
||||
|
||||
remoteTx.putDataRaw(oursCurFile.withCTime(first.cTime()).withMTime(first.mTime()).withMode(first.mode()).withSymlink(first.symlink()));
|
||||
fileHelper.replaceChunks(oursCurFile, firstChunks);
|
||||
|
||||
var newFile = new File(JObjectKey.random(), second.mode(), second.cTime(), second.mTime(), second.symlink());
|
||||
remoteTx.putData(newFile);
|
||||
fileHelper.replaceChunks(newFile, secondChunks);
|
||||
|
||||
var parent = fileService.inoToParent(oursCurFile.key());
|
||||
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
try {
|
||||
getTreeW().move(parent.getRight(),
|
||||
new JKleppmannTreeNodeMetaFile(
|
||||
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
|
||||
newFile.key()
|
||||
),
|
||||
getTreeW().getNewNodeId()
|
||||
);
|
||||
} catch (AlreadyExistsException aex) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} while (true);
|
||||
}
|
||||
|
||||
var curKnownRemoteVersion = oursCurMeta.knownRemoteVersions().get(from);
|
||||
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
|
||||
|
||||
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
|
||||
oursCurMeta = oursCurMeta.withKnownRemoteVersions(oursCurMeta.knownRemoteVersions().plus(from, receivedTotalVer));
|
||||
curTx.put(oursCurMeta);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRemoteUpdate(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
|
||||
@Nullable FileDto receivedData) {
|
||||
var current = curTx.get(RemoteObjectMeta.class, key).orElse(null);
|
||||
if (current == null) {
|
||||
current = new RemoteObjectMeta(key, HashTreePMap.empty());
|
||||
curTx.put(current);
|
||||
}
|
||||
|
||||
var changelogCompare = SyncHelper.compareChangelogs(current.changelog(), receivedChangelog);
|
||||
|
||||
switch (changelogCompare) {
|
||||
case EQUAL -> {
|
||||
Log.debug("No action on update: " + key + " from " + from);
|
||||
if (!current.hasLocalData() && receivedData != null) {
|
||||
current = current.withHaveLocal(true);
|
||||
curTx.put(current);
|
||||
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
|
||||
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
|
||||
|
||||
if (!current.knownType().isAssignableFrom(File.class))
|
||||
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
|
||||
|
||||
if (!current.knownType().equals(File.class))
|
||||
current = current.withKnownType(File.class);
|
||||
|
||||
curTx.put(current);
|
||||
|
||||
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
|
||||
}
|
||||
}
|
||||
case NEWER -> {
|
||||
Log.debug("Received newer index update than known: " + key + " from " + from);
|
||||
var newChangelog = receivedChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ?
|
||||
receivedChangelog : receivedChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L);
|
||||
current = current.withChangelog(newChangelog);
|
||||
|
||||
if (receivedData != null) {
|
||||
current = current.withHaveLocal(true);
|
||||
curTx.put(current);
|
||||
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
|
||||
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
|
||||
|
||||
if (!current.knownType().isAssignableFrom(File.class))
|
||||
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
|
||||
|
||||
if (!current.knownType().equals(File.class))
|
||||
current = current.withKnownType(File.class);
|
||||
|
||||
curTx.put(current);
|
||||
|
||||
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
|
||||
} else {
|
||||
current = current.withHaveLocal(false);
|
||||
curTx.put(current);
|
||||
}
|
||||
}
|
||||
case OLDER -> {
|
||||
Log.debug("Received older index update than known: " + key + " from " + from);
|
||||
return;
|
||||
}
|
||||
case CONFLICT -> {
|
||||
Log.debug("Conflict on update (inconsistent version): " + key + " from " + from);
|
||||
assert receivedData != null;
|
||||
resolveConflict(from, key, receivedChangelog, receivedData);
|
||||
// TODO:
|
||||
return;
|
||||
}
|
||||
}
|
||||
var curKnownRemoteVersion = current.knownRemoteVersions().get(from);
|
||||
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
|
||||
|
||||
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
|
||||
current = current.withKnownRemoteVersions(current.knownRemoteVersions().plus(from, receivedTotalVer));
|
||||
curTx.put(current);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -2,8 +2,7 @@ package com.usatiuk.dhfs.files.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Optional;
|
||||
@@ -29,9 +28,7 @@ public interface DhfsFileService {
|
||||
|
||||
Iterable<String> readDir(String name);
|
||||
|
||||
void updateFileSize(File file);
|
||||
|
||||
Long size(JObjectKey f);
|
||||
long size(JObjectKey fileUuid);
|
||||
|
||||
Optional<ByteString> read(JObjectKey fileUuid, long offset, int length);
|
||||
|
||||
@@ -2,21 +2,26 @@ package com.usatiuk.dhfs.files.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.dhfs.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.files.objects.ChunkData;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.objects.*;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.objects.jmap.JMapEntry;
|
||||
import com.usatiuk.dhfs.objects.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.objects.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.dhfs.objects.transaction.Transaction;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.jmap.JMapEntry;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -28,6 +33,7 @@ import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
@@ -42,21 +48,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
@Inject
|
||||
TransactionManager jObjectTxManager;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
|
||||
int targetChunkAlignment;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.write_merge_threshold")
|
||||
float writeMergeThreshold;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take")
|
||||
float writeMergeMaxChunkToTake;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.write_merge_limit")
|
||||
float writeMergeLimit;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.write_last_chunk_limit")
|
||||
float writeLastChunkLimit;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||
boolean useHashForChunks;
|
||||
|
||||
@@ -75,8 +72,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
@Inject
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTree() {
|
||||
return jKleppmannTreeManager.getTree(new JObjectKey("fs"));
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
|
||||
}
|
||||
|
||||
private ChunkData createChunk(ByteString bytes) {
|
||||
@@ -87,18 +88,25 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
|
||||
void init(@Observes @Priority(500) StartupEvent event) {
|
||||
Log.info("Initializing file service");
|
||||
getTree();
|
||||
getTreeW();
|
||||
}
|
||||
|
||||
private JKleppmannTreeNode getDirEntry(String name) {
|
||||
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
private JKleppmannTreeNode getDirEntryW(String name) {
|
||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private JKleppmannTreeNode getDirEntryR(String name) {
|
||||
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
|
||||
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) return Optional.empty();
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res);
|
||||
return ret;
|
||||
@@ -130,7 +138,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
public Optional<JObjectKey> open(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
try {
|
||||
var ret = getDirEntry(name);
|
||||
var ret = getDirEntryR(name);
|
||||
return switch (ret.meta()) {
|
||||
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno());
|
||||
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
|
||||
@@ -154,7 +162,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
public Optional<JObjectKey> create(String name, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
@@ -162,11 +170,11 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
|
||||
var fuuid = UUID.randomUUID();
|
||||
Log.debug("Creating file " + fuuid);
|
||||
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false, 0);
|
||||
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
|
||||
remoteTx.putData(f);
|
||||
|
||||
try {
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
||||
} catch (Exception e) {
|
||||
// fobj.getMeta().removeRef(newNodeId);
|
||||
throw e;
|
||||
@@ -179,7 +187,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
@Override
|
||||
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return getTree().findParent(w -> {
|
||||
return getTreeW().findParent(w -> {
|
||||
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
|
||||
return f.getFileIno().equals(ino);
|
||||
return false;
|
||||
@@ -191,14 +199,14 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
public void mkdir(String name, long mode) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
ensureDir(parent);
|
||||
|
||||
String dname = path.getFileName().toString();
|
||||
|
||||
Log.debug("Creating directory " + name);
|
||||
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId());
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
|
||||
});
|
||||
}
|
||||
|
||||
@@ -210,21 +218,21 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
if (!allowRecursiveDelete && !node.children().isEmpty())
|
||||
throw new DirectoryNotEmptyException();
|
||||
}
|
||||
getTree().trash(node.meta(), node.key());
|
||||
getTreeW().trash(node.meta(), node.key());
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean rename(String from, String to) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntry(from);
|
||||
var node = getDirEntryW(from);
|
||||
JKleppmannTreeNodeMeta meta = node.meta();
|
||||
|
||||
var toPath = Path.of(to);
|
||||
var toDentry = getDirEntry(toPath.getParent().toString());
|
||||
var toDentry = getDirEntryW(toPath.getParent().toString());
|
||||
ensureDir(toDentry);
|
||||
|
||||
getTree().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
|
||||
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
|
||||
return true;
|
||||
});
|
||||
}
|
||||
@@ -253,7 +261,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
@Override
|
||||
public Iterable<String> readDir(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var found = getDirEntry(name);
|
||||
var found = getDirEntryW(name);
|
||||
|
||||
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
@@ -339,22 +347,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
return readChunk(uuid).size();
|
||||
}
|
||||
|
||||
private void cleanupChunks(File f, Collection<JObjectKey> uuids) {
|
||||
// FIXME:
|
||||
// var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet();
|
||||
// for (var cuuid : uuids) {
|
||||
// try {
|
||||
// if (inFile.contains(cuuid)) continue;
|
||||
// jObjectManager.get(cuuid)
|
||||
// .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION,
|
||||
// (m, d, b, v) -> {
|
||||
// m.removeRef(f.getName());
|
||||
// return null;
|
||||
// }));
|
||||
// } catch (Exception e) {
|
||||
// Log.error("Error when cleaning chunk " + cuuid, e);
|
||||
// }
|
||||
// }
|
||||
private long alignDown(long num, long n) {
|
||||
return num & -(1L << n);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -363,7 +357,6 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
if (offset < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||
|
||||
// FIXME:
|
||||
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to write: " + fileUuid);
|
||||
@@ -375,149 +368,70 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
+ offset + " " + data.size());
|
||||
}
|
||||
|
||||
if (size(fileUuid) < offset) {
|
||||
truncate(fileUuid, offset);
|
||||
file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
}
|
||||
|
||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
|
||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||
Log.tracev("Getting last");
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(offset + data.size()))) {
|
||||
last = it.hasNext() ? it.next() : null;
|
||||
Log.tracev("Last: {0}", last);
|
||||
}
|
||||
|
||||
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||
|
||||
long start = 0;
|
||||
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
||||
long writeEnd = offset + data.size();
|
||||
long start = realOffset;
|
||||
long existingEnd = 0;
|
||||
ByteString pendingPrefix = ByteString.empty();
|
||||
ByteString pendingSuffix = ByteString.empty();
|
||||
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||
first = it.hasNext() ? it.next() : null;
|
||||
Log.tracev("First: {0}", first);
|
||||
boolean empty = last == null;
|
||||
if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
|
||||
first = null;
|
||||
last = null;
|
||||
start = offset;
|
||||
} else if (!empty) {
|
||||
assert first != null;
|
||||
removedChunks.put(first.getKey().key(), first.getValue().ref());
|
||||
while (it.hasNext() && it.peekNextKey().compareTo(last.getKey()) <= 0) {
|
||||
var next = it.next();
|
||||
Log.tracev("Next: {0}", next);
|
||||
removedChunks.put(next.getKey().key(), next.getValue().ref());
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
|
||||
while (it.hasNext()) {
|
||||
var curEntry = it.next();
|
||||
long curChunkStart = curEntry.getKey().key();
|
||||
var curChunkId = curEntry.getValue().ref();
|
||||
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
|
||||
existingEnd = curChunkEnd;
|
||||
if (curChunkEnd <= realOffset) break;
|
||||
|
||||
removedChunks.put(curEntry.getKey().key(), curChunkId);
|
||||
|
||||
if (curChunkStart < offset) {
|
||||
if (curChunkStart < start)
|
||||
start = curChunkStart;
|
||||
|
||||
var readChunk = readChunk(curChunkId);
|
||||
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
|
||||
}
|
||||
removedChunks.put(last.getKey().key(), last.getValue().ref());
|
||||
start = first.getKey().key();
|
||||
|
||||
if (curChunkEnd > writeEnd) {
|
||||
var readChunk = readChunk(curChunkId);
|
||||
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
|
||||
}
|
||||
|
||||
if (curChunkEnd >= writeEnd) break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NavigableMap<Long, JObjectKey> beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap();
|
||||
// NavigableMap<Long, JObjectKey> afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap();
|
||||
|
||||
// if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) {
|
||||
// beforeFirst = chunksAll;
|
||||
// afterLast = Collections.emptyNavigableMap();
|
||||
// first = null;
|
||||
// last = null;
|
||||
// start = offset;
|
||||
// } else if (!chunksAll.isEmpty()) {
|
||||
// var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true);
|
||||
// removedChunks.putAll(between);
|
||||
// start = first.getKey();
|
||||
// }
|
||||
|
||||
ByteString pendingWrites = ByteString.empty();
|
||||
|
||||
if (first != null && first.getKey().key() < offset) {
|
||||
var chunkBytes = readChunk(first.getValue().ref());
|
||||
pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey().key())));
|
||||
}
|
||||
pendingWrites = pendingWrites.concat(data);
|
||||
|
||||
if (last != null) {
|
||||
var lchunkBytes = readChunk(last.getValue().ref());
|
||||
if (last.getKey().key() + lchunkBytes.size() > offset + data.size()) {
|
||||
var startInFile = offset + data.size();
|
||||
var startInChunk = startInFile - last.getKey().key();
|
||||
pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size()));
|
||||
}
|
||||
}
|
||||
|
||||
int combinedSize = pendingWrites.size();
|
||||
|
||||
if (targetChunkSize > 0) {
|
||||
// if (combinedSize < (targetChunkSize * writeMergeThreshold)) {
|
||||
// boolean leftDone = false;
|
||||
// boolean rightDone = false;
|
||||
// while (!leftDone && !rightDone) {
|
||||
// if (beforeFirst.isEmpty()) leftDone = true;
|
||||
// if (!beforeFirst.isEmpty() || !leftDone) {
|
||||
// var takeLeft = beforeFirst.lastEntry();
|
||||
//
|
||||
// var cuuid = takeLeft.getValue();
|
||||
//
|
||||
// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
|
||||
// leftDone = true;
|
||||
// continue;
|
||||
// }
|
||||
//
|
||||
// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
|
||||
// leftDone = true;
|
||||
// continue;
|
||||
// }
|
||||
//
|
||||
// // FIXME: (and test this)
|
||||
// beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false);
|
||||
// start = takeLeft.getKey();
|
||||
// pendingWrites = readChunk(cuuid).concat(pendingWrites);
|
||||
// combinedSize += getChunkSize(cuuid);
|
||||
// removedChunks.put(takeLeft.getKey(), takeLeft.getValue());
|
||||
// }
|
||||
// if (afterLast.isEmpty()) rightDone = true;
|
||||
// if (!afterLast.isEmpty() && !rightDone) {
|
||||
// var takeRight = afterLast.firstEntry();
|
||||
//
|
||||
// var cuuid = takeRight.getValue();
|
||||
//
|
||||
// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
|
||||
// rightDone = true;
|
||||
// continue;
|
||||
// }
|
||||
//
|
||||
// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
|
||||
// rightDone = true;
|
||||
// continue;
|
||||
// }
|
||||
//
|
||||
// // FIXME: (and test this)
|
||||
// afterLast = afterLast.tailMap(takeRight.getKey(), false);
|
||||
// pendingWrites = pendingWrites.concat(readChunk(cuuid));
|
||||
// combinedSize += getChunkSize(cuuid);
|
||||
// removedChunks.put(takeRight.getKey(), takeRight.getValue());
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||
|
||||
if (existingEnd < offset) {
|
||||
if (!pendingPrefix.isEmpty()) {
|
||||
int diff = Math.toIntExact(offset - existingEnd);
|
||||
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
|
||||
} else {
|
||||
fillZeros(existingEnd, offset, newChunks);
|
||||
start = offset;
|
||||
}
|
||||
}
|
||||
|
||||
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
|
||||
|
||||
int combinedSize = pendingWrites.size();
|
||||
|
||||
{
|
||||
int targetChunkSize = 1 << targetChunkAlignment;
|
||||
int cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
int end;
|
||||
|
||||
if (targetChunkSize <= 0)
|
||||
if (targetChunkAlignment < 0)
|
||||
end = combinedSize;
|
||||
else {
|
||||
if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) {
|
||||
end = Math.min(cur + targetChunkSize, combinedSize);
|
||||
} else {
|
||||
end = combinedSize;
|
||||
}
|
||||
}
|
||||
else
|
||||
end = Math.min(cur + targetChunkSize, combinedSize);
|
||||
|
||||
var thisChunk = pendingWrites.substring(cur, end);
|
||||
|
||||
@@ -540,8 +454,6 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
|
||||
remoteTx.putData(file);
|
||||
cleanupChunks(file, removedChunks.values());
|
||||
updateFileSize(file);
|
||||
|
||||
return (long) data.size();
|
||||
});
|
||||
@@ -560,18 +472,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
|
||||
if (length == 0) {
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.GE, JMapLongKey.of(0))) {
|
||||
while (it.hasNext()) {
|
||||
var next = it.next();
|
||||
jMapHelper.delete(file, next.getKey());
|
||||
}
|
||||
}
|
||||
// var oldChunks = file.chunks();
|
||||
//
|
||||
// file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis());
|
||||
jMapHelper.deleteAll(file);
|
||||
remoteTx.putData(file);
|
||||
// cleanupChunks(file, oldChunks.values());
|
||||
updateFileSize(file);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -582,38 +484,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||
|
||||
if (curSize < length) {
|
||||
long combinedSize = (length - curSize);
|
||||
|
||||
long start = curSize;
|
||||
|
||||
// Hack
|
||||
HashMap<Long, ByteString> zeroCache = new HashMap<>();
|
||||
|
||||
{
|
||||
long cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
long end;
|
||||
|
||||
if (targetChunkSize <= 0)
|
||||
end = combinedSize;
|
||||
else {
|
||||
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
||||
end = cur + targetChunkSize;
|
||||
} else {
|
||||
end = combinedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (!zeroCache.containsKey(end - cur))
|
||||
zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)]));
|
||||
|
||||
ChunkData newChunkData = createChunk(zeroCache.get(end - cur));
|
||||
newChunks.put(start, newChunkData.key());
|
||||
|
||||
start += newChunkData.data().size();
|
||||
cur = end;
|
||||
}
|
||||
}
|
||||
fillZeros(curSize, length, newChunks);
|
||||
} else {
|
||||
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
|
||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||
@@ -674,12 +545,45 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
|
||||
remoteTx.putData(file);
|
||||
cleanupChunks(file, removedChunks.values());
|
||||
updateFileSize(file);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
private void fillZeros(long fillStart, long length, NavigableMap<Long, JObjectKey> newChunks) {
|
||||
long combinedSize = (length - fillStart);
|
||||
|
||||
long start = fillStart;
|
||||
|
||||
// Hack
|
||||
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
|
||||
|
||||
{
|
||||
long cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
long end;
|
||||
|
||||
if (targetChunkSize <= 0)
|
||||
end = combinedSize;
|
||||
else {
|
||||
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
||||
end = cur + targetChunkSize;
|
||||
} else {
|
||||
end = combinedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (!zeroCache.containsKey(end - cur))
|
||||
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
|
||||
|
||||
ChunkData newChunkData = zeroCache.get(end - cur);
|
||||
newChunks.put(start, newChunkData.key());
|
||||
|
||||
start += newChunkData.data().size();
|
||||
cur = end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readlink(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
@@ -699,7 +603,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
public JObjectKey symlink(String oldpath, String newpath) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(newpath);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
var parent = getDirEntryW(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
@@ -709,12 +613,11 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
Log.debug("Creating file " + fuuid);
|
||||
|
||||
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
|
||||
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true, 0);
|
||||
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
|
||||
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
|
||||
|
||||
updateFileSize(f);
|
||||
|
||||
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
|
||||
remoteTx.putData(f);
|
||||
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
|
||||
return f.key();
|
||||
});
|
||||
}
|
||||
@@ -722,23 +625,33 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
@Override
|
||||
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElseThrow(
|
||||
() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription(
|
||||
"File not found for setTimes: " + fileUuid))
|
||||
);
|
||||
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
remoteTx.putData(file.withCTime(atimeMs).withMTime(mtimeMs));
|
||||
return true;
|
||||
// FIXME:
|
||||
if (dent instanceof JKleppmannTreeNode) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
|
||||
return true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(fileUuid + " is not a file");
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException(fileUuid + " is not a file");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateFileSize(File file) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
public long size(JObjectKey fileUuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
long realSize = 0;
|
||||
var file = remoteTx.getData(File.class, fileUuid)
|
||||
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
|
||||
|
||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||
Log.tracev("Getting last");
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
|
||||
last = it.hasNext() ? it.next() : null;
|
||||
}
|
||||
@@ -747,19 +660,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
|
||||
}
|
||||
|
||||
if (realSize != file.size()) {
|
||||
remoteTx.putData(file.withSize(realSize));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long size(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var read = remoteTx.getData(File.class, uuid)
|
||||
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
|
||||
|
||||
return read.size();
|
||||
return realSize;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
quarkus.grpc.server.use-separate-server=false
|
||||
dhfs.objects.peerdiscovery.port=42069
|
||||
dhfs.objects.peerdiscovery.interval=4s
|
||||
dhfs.objects.peerdiscovery.broadcast=true
|
||||
dhfs.objects.sync.timeout=30
|
||||
dhfs.objects.sync.ping.timeout=5
|
||||
dhfs.objects.invalidation.threads=16
|
||||
dhfs.objects.invalidation.delay=1000
|
||||
dhfs.objects.reconnect_interval=5s
|
||||
dhfs.objects.write_log=false
|
||||
dhfs.objects.periodic-push-op-interval=5m
|
||||
dhfs.fuse.root=${HOME}/dhfs_default/fuse
|
||||
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=2097152
|
||||
dhfs.files.target_chunk_alignment=19
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.files.use_hash_for_chunks=false
|
||||
dhfs.objects.autosync.threads=16
|
||||
dhfs.objects.autosync.download-all=false
|
||||
dhfs.objects.move-processor.threads=16
|
||||
dhfs.objects.ref-processor.threads=16
|
||||
dhfs.objects.opsender.batch-size=100
|
||||
dhfs.objects.lock_timeout_secs=2
|
||||
dhfs.local-discovery=true
|
||||
dhfs.peerdiscovery.timeout=10000
|
||||
quarkus.log.category."com.usatiuk".min-level=TRACE
|
||||
quarkus.log.category."com.usatiuk".level=TRACE
|
||||
quarkus.http.insecure-requests=enabled
|
||||
quarkus.http.ssl.client-auth=required
|
||||
@@ -9,7 +9,8 @@ import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
abstract public class TempDataProfile implements QuarkusTestProfile {
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {}
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
}
|
||||
|
||||
@Override
|
||||
final public Map<String, String> getConfigOverrides() {
|
||||
@@ -21,7 +22,6 @@ abstract public class TempDataProfile implements QuarkusTestProfile {
|
||||
}
|
||||
var ret = new HashMap<String, String>();
|
||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
||||
ret.put("dhfs.objects.root", tempDirWithPrefix.resolve("dhfs_root_d_test").toString());
|
||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
||||
getConfigOverrides(ret);
|
||||
return ret;
|
||||
@@ -3,7 +3,7 @@ package com.usatiuk.dhfs.benchmarks;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.TempDataProfile;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
import jakarta.inject.Inject;
|
||||
@@ -1,12 +1,12 @@
|
||||
package com.usatiuk.dhfs.files;
|
||||
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.TempDataProfile;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.objects.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.objects.TransactionManager;
|
||||
import com.usatiuk.dhfs.objects.transaction.Transaction;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.RepeatedTest;
|
||||
@@ -27,6 +27,7 @@ class Profiles {
|
||||
protected void getConfigOverrides(Map<String, String> ret) {
|
||||
ret.put("dhfs.fuse.enabled", "false");
|
||||
ret.put("dhfs.files.target_chunk_size", "-1");
|
||||
ret.put("dhfs.files.target_chunk_alignment", "-1");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,11 +36,12 @@ class Profiles {
|
||||
protected void getConfigOverrides(Map<String, String> ret) {
|
||||
ret.put("dhfs.fuse.enabled", "false");
|
||||
ret.put("dhfs.files.target_chunk_size", "3");
|
||||
ret.put("dhfs.files.target_chunk_alignment", "2");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class DhfsFileServiceSimpleTestImpl {
|
||||
public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
@Inject
|
||||
@@ -150,6 +152,7 @@ public class DhfsFileServiceSimpleTestImpl {
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
|
||||
fileService.truncate(uuid, 20);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
}
|
||||
@@ -166,6 +169,7 @@ public class DhfsFileServiceSimpleTestImpl {
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
|
||||
fileService.truncate(uuid, 20);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
} finally {
|
||||
@@ -8,4 +8,5 @@ quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
|
||||
quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib
|
||||
quarkus.http.test-port=0
|
||||
quarkus.http.test-ssl-port=0
|
||||
dhfs.local-discovery=false
|
||||
dhfs.local-discovery=false
|
||||
dhfs.objects.persistence.snapshot-extra-checks=true
|
||||
5
dhfs-parent/dhfs-fuse/.dockerignore
Normal file
5
dhfs-parent/dhfs-fuse/.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
*
|
||||
!target/*-runner
|
||||
!target/*-runner.jar
|
||||
!target/lib/*
|
||||
!target/quarkus-app/*
|
||||
43
dhfs-parent/dhfs-fuse/.gitignore
vendored
Normal file
43
dhfs-parent/dhfs-fuse/.gitignore
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
#Maven
|
||||
target/
|
||||
pom.xml.tag
|
||||
pom.xml.releaseBackup
|
||||
pom.xml.versionsBackup
|
||||
release.properties
|
||||
.flattened-pom.xml
|
||||
|
||||
# Eclipse
|
||||
.project
|
||||
.classpath
|
||||
.settings/
|
||||
bin/
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
*.ipr
|
||||
*.iml
|
||||
*.iws
|
||||
|
||||
# NetBeans
|
||||
nb-configuration.xml
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode
|
||||
.factorypath
|
||||
|
||||
# OSX
|
||||
.DS_Store
|
||||
|
||||
# Vim
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# patch
|
||||
*.orig
|
||||
*.rej
|
||||
|
||||
# Local environment
|
||||
.env
|
||||
|
||||
# Plugin directory
|
||||
/.quarkus/cli/plugins/
|
||||
2
dhfs-parent/dhfs-fuse/Dockerfile
Normal file
2
dhfs-parent/dhfs-fuse/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM azul/zulu-openjdk-debian:21-jre-latest
|
||||
RUN apt update && apt install -y libfuse2 curl
|
||||
43
dhfs-parent/dhfs-fuse/docker-compose.yml
Normal file
43
dhfs-parent/dhfs-fuse/docker-compose.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
dhfs1:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs1:/dhfs_root
|
||||
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 8081:8443
|
||||
- 5005:5005
|
||||
dhfs2:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs2:/dhfs_root
|
||||
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8090:8080
|
||||
- 8091:8443
|
||||
- 5010:5010
|
||||
@@ -3,8 +3,8 @@
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>server</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<artifactId>dhfs-fuse</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
@@ -23,26 +23,13 @@
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk18on</artifactId>
|
||||
<version>1.78.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk18on</artifactId>
|
||||
<version>1.78.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
@@ -150,6 +137,16 @@
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
@@ -166,6 +163,11 @@
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@@ -174,7 +176,7 @@
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
true
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
97
dhfs-parent/dhfs-fuse/src/main/docker/Dockerfile.jvm
Normal file
97
dhfs-parent/dhfs-fuse/src/main/docker/Dockerfile.jvm
Normal file
@@ -0,0 +1,97 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
# We make four distinct layers so if there are application changes the library layers can be re-used
|
||||
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
|
||||
COPY --chown=185 target/quarkus-app/*.jar /deployments/
|
||||
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
|
||||
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
|
||||
93
dhfs-parent/dhfs-fuse/src/main/docker/Dockerfile.legacy-jar
Normal file
93
dhfs-parent/dhfs-fuse/src/main/docker/Dockerfile.legacy-jar
Normal file
@@ -0,0 +1,93 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
COPY target/lib/* /deployments/lib/
|
||||
COPY target/*-runner.jar /deployments/quarkus-run.jar
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
27
dhfs-parent/dhfs-fuse/src/main/docker/Dockerfile.native
Normal file
27
dhfs-parent/dhfs-fuse/src/main/docker/Dockerfile.native
Normal file
@@ -0,0 +1,27 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -0,0 +1,30 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
# It uses a micro base image, tuned for Quarkus native executables.
|
||||
# It reduces the size of the resulting container image.
|
||||
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM quay.io/quarkus/quarkus-micro-image:2.0
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -7,6 +7,7 @@ import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException;
|
||||
import com.usatiuk.dhfs.files.service.GetattrRes;
|
||||
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -30,6 +31,8 @@ import ru.serce.jnrfuse.struct.Timespec;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static jnr.posix.FileStat.*;
|
||||
|
||||
@@ -50,9 +53,31 @@ public class DhfsFuse extends FuseStubFS {
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
|
||||
private final AtomicLong _fh = new AtomicLong(1);
|
||||
|
||||
private long allocateHandle(JObjectKey key) {
|
||||
while (true) {
|
||||
var newFh = _fh.getAndIncrement();
|
||||
if (newFh == 0) continue;
|
||||
if (_openHandles.putIfAbsent(newFh, key) == null) {
|
||||
return newFh;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private JObjectKey getFromHandle(long handle) {
|
||||
assert handle != 0;
|
||||
return _openHandles.get(handle);
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(100000) StartupEvent event) {
|
||||
if (!enabled) return;
|
||||
Paths.get(root).toFile().mkdirs();
|
||||
|
||||
if (!Paths.get(root).toFile().isDirectory())
|
||||
throw new IllegalStateException("Could not create directory " + root);
|
||||
|
||||
Log.info("Mounting with root " + root);
|
||||
|
||||
var uid = new UnixSystem().getUid();
|
||||
@@ -174,7 +199,9 @@ public class DhfsFuse extends FuseStubFS {
|
||||
@Override
|
||||
public int open(String path, FuseFileInfo fi) {
|
||||
try {
|
||||
if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var opened = fileService.open(path);
|
||||
if (opened.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
fi.fh.set(allocateHandle(opened.get()));
|
||||
return 0;
|
||||
} catch (Throwable e) {
|
||||
Log.error("When open " + path, e);
|
||||
@@ -182,15 +209,20 @@ public class DhfsFuse extends FuseStubFS {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int release(String path, FuseFileInfo fi) {
|
||||
assert fi.fh.get() != 0;
|
||||
_openHandles.remove(fi.fh.get());
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
|
||||
if (size < 0) return -ErrorCodes.EINVAL();
|
||||
if (offset < 0) return -ErrorCodes.EINVAL();
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var file = fileOpt.get();
|
||||
var read = fileService.read(fileOpt.get(), offset, (int) size);
|
||||
var fileKey = getFromHandle(fi.fh.get());
|
||||
var read = fileService.read(fileKey, offset, (int) size);
|
||||
if (read.isEmpty()) return 0;
|
||||
UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
return read.get().size();
|
||||
@@ -204,8 +236,7 @@ public class DhfsFuse extends FuseStubFS {
|
||||
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
|
||||
if (offset < 0) return -ErrorCodes.EINVAL();
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var fileKey = getFromHandle(fi.fh.get());
|
||||
var buffer = UninitializedByteBuffer.allocateUninitialized((int) size);
|
||||
|
||||
if (buffer.isDirect()) {
|
||||
@@ -218,7 +249,7 @@ public class DhfsFuse extends FuseStubFS {
|
||||
buf.get(0, buffer.array(), 0, (int) size);
|
||||
}
|
||||
|
||||
var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer));
|
||||
var written = fileService.write(fileKey, offset, UnsafeByteOperations.unsafeWrap(buffer));
|
||||
return written.intValue();
|
||||
} catch (Throwable e) {
|
||||
Log.error("When writing " + path, e);
|
||||
@@ -231,7 +262,8 @@ public class DhfsFuse extends FuseStubFS {
|
||||
try {
|
||||
var ret = fileService.create(path, mode);
|
||||
if (ret.isEmpty()) return -ErrorCodes.ENOSPC();
|
||||
else return 0;
|
||||
fi.fh.set(allocateHandle(ret.get()));
|
||||
return 0;
|
||||
} catch (Throwable e) {
|
||||
Log.error("When creating " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
@@ -0,0 +1,34 @@
|
||||
quarkus.grpc.server.use-separate-server=false
|
||||
dhfs.objects.peerdiscovery.port=42069
|
||||
dhfs.objects.peerdiscovery.interval=4s
|
||||
dhfs.objects.peerdiscovery.broadcast=true
|
||||
dhfs.objects.sync.timeout=30
|
||||
dhfs.objects.sync.ping.timeout=5
|
||||
dhfs.objects.invalidation.threads=16
|
||||
dhfs.objects.invalidation.delay=1000
|
||||
dhfs.objects.reconnect_interval=5s
|
||||
dhfs.objects.write_log=false
|
||||
dhfs.objects.periodic-push-op-interval=5m
|
||||
dhfs.fuse.root=${HOME}/dhfs_default/fuse
|
||||
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=2097152
|
||||
dhfs.files.target_chunk_alignment=19
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.files.use_hash_for_chunks=false
|
||||
dhfs.objects.autosync.threads=16
|
||||
dhfs.objects.autosync.download-all=false
|
||||
dhfs.objects.move-processor.threads=16
|
||||
dhfs.objects.ref-processor.threads=16
|
||||
dhfs.objects.opsender.batch-size=100
|
||||
dhfs.objects.lock_timeout_secs=2
|
||||
dhfs.local-discovery=true
|
||||
dhfs.peerdiscovery.timeout=10000
|
||||
quarkus.log.category."com.usatiuk".min-level=TRACE
|
||||
quarkus.log.category."com.usatiuk".level=TRACE
|
||||
quarkus.http.insecure-requests=enabled
|
||||
quarkus.http.ssl.client-auth=required
|
||||
@@ -0,0 +1,29 @@
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
abstract public class TempDataProfile implements QuarkusTestProfile {
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
}
|
||||
|
||||
@Override
|
||||
final public Map<String, String> getConfigOverrides() {
|
||||
Path tempDirWithPrefix;
|
||||
try {
|
||||
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var ret = new HashMap<String, String>();
|
||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
||||
getConfigOverrides(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
@@ -0,0 +1,12 @@
|
||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
|
||||
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
|
||||
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
|
||||
dhfs.objects.ref_verification=true
|
||||
dhfs.objects.deletion.delay=0
|
||||
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
|
||||
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
|
||||
quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib
|
||||
quarkus.http.test-port=0
|
||||
quarkus.http.test-ssl-port=0
|
||||
dhfs.local-discovery=false
|
||||
dhfs.objects.persistence.snapshot-extra-checks=true
|
||||
@@ -18,6 +18,11 @@
|
||||
<artifactId>junit-jupiter-engine</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
@@ -30,5 +35,9 @@
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>jakarta.annotation</groupId>
|
||||
<artifactId>jakarta.annotation-api</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user