diff --git a/.github/workflows/server.yml b/.github/workflows/server.yml index b4738b5f..efef968f 100644 --- a/.github/workflows/server.yml +++ b/.github/workflows/server.yml @@ -54,12 +54,12 @@ jobs: # - name: Build with Maven # run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: DHFS Server Package path: dhfs-parent/server/target/quarkus-app - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: ${{ always() }} with: name: Test logs @@ -84,7 +84,7 @@ jobs: - name: NPM Build run: cd webui && npm run build - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: Webui path: webui/dist @@ -155,7 +155,7 @@ jobs: CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result" - name: Upload build - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }} path: result @@ -168,7 +168,7 @@ jobs: uses: actions/checkout@v4 - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: path: downloaded-libs @@ -180,7 +180,7 @@ jobs: test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1 - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: NativeLibs path: result @@ -201,19 +201,19 @@ jobs: uses: actions/checkout@v4 - name: Download server package - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: DHFS Server Package path: dhfs-package-downloaded - name: Download webui - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Webui path: webui-dist-downloaded - name: Download native libs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: NativeLibs path: dhfs-native-downloaded @@ -299,17 +299,17 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: DHFS Server Package path: dhfs-package-downloaded - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: Webui path: webui-dist-downloaded - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: NativeLibs path: dhfs-native-downloaded @@ -339,7 +339,7 @@ jobs: run: tar -cvf ~/run-wrapper.tar.gz ./run-wrapper-out - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Run wrapper path: ~/run-wrapper.tar.gz diff --git a/dhfs-parent/autoprotomap/deployment/pom.xml b/dhfs-parent/autoprotomap/deployment/pom.xml index 29c02d7a..13f90a9d 100644 --- a/dhfs-parent/autoprotomap/deployment/pom.xml +++ b/dhfs-parent/autoprotomap/deployment/pom.xml @@ -34,11 +34,6 @@ org.apache.commons commons-collections4 - - org.projectlombok - lombok - provided - diff --git a/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/AutoprotomapProcessor.java b/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/AutoprotomapProcessor.java index 3c3b0809..d3e574eb 100644 --- a/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/AutoprotomapProcessor.java +++ b/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/AutoprotomapProcessor.java @@ -68,11 +68,11 @@ class AutoprotomapProcessor { } } catch (Throwable e) { StringBuilder sb = new StringBuilder(); - sb.append(e.toString() + "\n"); + sb.append(e + "\n"); for (var el : e.getStackTrace()) { sb.append(el.toString() + "\n"); } - System.out.println(sb.toString()); + System.out.println(sb); } } } diff --git a/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/ProtoSerializerGenerator.java b/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/ProtoSerializerGenerator.java index 386f79f1..6ed94f3a 100644 --- a/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/ProtoSerializerGenerator.java +++ b/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/ProtoSerializerGenerator.java @@ -14,6 +14,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.Objects; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntConsumer; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -61,7 +62,7 @@ public class ProtoSerializerGenerator { visitor.accept(cur); var next = cur.superClassType().name(); - if (next.equals(DotName.OBJECT_NAME)) break; + if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break; cur = index.getClassByName(next); } } @@ -82,6 +83,10 @@ public class ProtoSerializerGenerator { var objectClass = index.getClassByName(objectType.name().toString()); + Function getterGetter = objectClass.isRecord() + ? Function.identity() + : s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX)); + for (var f : findAllFields(index, objectClass)) { var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX); @@ -89,7 +94,7 @@ public class ProtoSerializerGenerator { if ((f.flags() & Opcodes.ACC_PUBLIC) != 0) return bytecodeCreator.readInstanceField(f, object); else { - var fieldGetter = "get" + capitalize(stripPrefix(f.name(), FIELD_PREFIX)); + var fieldGetter = getterGetter.apply(f.name()); return bytecodeCreator.invokeVirtualMethod( MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object); } diff --git a/dhfs-parent/autoprotomap/integration-tests/pom.xml b/dhfs-parent/autoprotomap/integration-tests/pom.xml index 88e789ca..1af18935 100644 --- a/dhfs-parent/autoprotomap/integration-tests/pom.xml +++ b/dhfs-parent/autoprotomap/integration-tests/pom.xml @@ -22,10 +22,6 @@ lombok provided - - io.quarkus - quarkus-resteasy-reactive - com.usatiuk autoprotomap @@ -41,11 +37,6 @@ quarkus-junit5 test - - io.rest-assured - rest-assured - test - io.quarkus quarkus-grpc diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/AutoprotomapResource.java b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/AutoprotomapResource.java deleted file mode 100644 index a56a2f81..00000000 --- a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/AutoprotomapResource.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.usatiuk.autoprotomap.it; - -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.ws.rs.GET; -import jakarta.ws.rs.Path; - -@Path("/autoprotomap") -@ApplicationScoped -public class AutoprotomapResource { - // add some rest methods here - - @GET - public String hello() { - return "Hello autoprotomap"; - } -} diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/InterfaceObject.java b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/InterfaceObject.java new file mode 100644 index 00000000..7b06b316 --- /dev/null +++ b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/InterfaceObject.java @@ -0,0 +1,8 @@ +package com.usatiuk.autoprotomap.it; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; + +@ProtoMirror(InterfaceObjectProto.class) +public interface InterfaceObject { + String key(); +} diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject.java b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject.java new file mode 100644 index 00000000..b314ca9a --- /dev/null +++ b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject.java @@ -0,0 +1,7 @@ +package com.usatiuk.autoprotomap.it; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; + +@ProtoMirror(RecordObjectProto.class) +public record RecordObject(String key) implements InterfaceObject { +} diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject2.java b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject2.java new file mode 100644 index 00000000..4c66dfc3 --- /dev/null +++ b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject2.java @@ -0,0 +1,7 @@ +package com.usatiuk.autoprotomap.it; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; + +@ProtoMirror(RecordObject2Proto.class) +public record RecordObject2(String key, int value) implements InterfaceObject { +} diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/proto/autoprotomap_test.proto b/dhfs-parent/autoprotomap/integration-tests/src/main/proto/autoprotomap_test.proto index f606b3b4..c60bcec7 100644 --- a/dhfs-parent/autoprotomap/integration-tests/src/main/proto/autoprotomap_test.proto +++ b/dhfs-parent/autoprotomap/integration-tests/src/main/proto/autoprotomap_test.proto @@ -28,4 +28,20 @@ message AbstractProto { SimpleObjectProto simpleObject = 2; CustomObjectProto customObject = 3; } +} + +message RecordObjectProto { + string key = 1; +} + +message RecordObject2Proto { + string key = 1; + int32 value = 2; +} + +message InterfaceObjectProto { + oneof obj { + RecordObjectProto recordObject = 1; + RecordObject2Proto recordObject2 = 2; + } } \ No newline at end of file diff --git a/dhfs-parent/autoprotomap/integration-tests/src/test/java/com/usatiuk/autoprotomap/it/AutoprotomapResourceTest.java b/dhfs-parent/autoprotomap/integration-tests/src/test/java/com/usatiuk/autoprotomap/it/AutoprotomapResourceTest.java index 2d02ffd3..36f63bf6 100644 --- a/dhfs-parent/autoprotomap/integration-tests/src/test/java/com/usatiuk/autoprotomap/it/AutoprotomapResourceTest.java +++ b/dhfs-parent/autoprotomap/integration-tests/src/test/java/com/usatiuk/autoprotomap/it/AutoprotomapResourceTest.java @@ -16,6 +16,8 @@ public class AutoprotomapResourceTest { ProtoSerializer nestedProtoSerializer; @Inject ProtoSerializer abstractProtoSerializer; + @Inject + ProtoSerializer interfaceProtoSerializer; @Test public void testSimple() { @@ -74,7 +76,7 @@ public class AutoprotomapResourceTest { } @Test - public void tesAbstractNested() { + public void testAbstractNested() { var ret = abstractProtoSerializer.serialize( new NestedObject( new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})), @@ -93,4 +95,19 @@ public class AutoprotomapResourceTest { Assertions.assertEquals("nested obj", des.get_nestedName()); Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes()); } + + @Test + public void testInterface() { + var ret = interfaceProtoSerializer.serialize(new RecordObject("record test")); + Assertions.assertEquals("record test", ret.getRecordObject().getKey()); + var des = (RecordObject) interfaceProtoSerializer.deserialize(ret); + Assertions.assertEquals("record test", des.key()); + + var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234)); + Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey()); + Assertions.assertEquals(1234, ret2.getRecordObject2().getValue()); + var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2); + Assertions.assertEquals("record test 2", des2.key()); + Assertions.assertEquals(1234, des2.value()); + } } diff --git a/dhfs-parent/kleppmanntree/pom.xml b/dhfs-parent/kleppmanntree/pom.xml index e3d133cd..077abfd1 100644 --- a/dhfs-parent/kleppmanntree/pom.xml +++ b/dhfs-parent/kleppmanntree/pom.xml @@ -13,19 +13,22 @@ kleppmanntree - - org.projectlombok - lombok - provided - org.junit.jupiter junit-jupiter-engine test + + org.apache.commons + commons-collections4 + org.apache.commons commons-lang3 + + org.pcollections + pcollections + \ No newline at end of file diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/AtomicClock.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/AtomicClock.java index 32e9b89e..f524473a 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/AtomicClock.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/AtomicClock.java @@ -18,11 +18,6 @@ public class AtomicClock implements Clock, Serializable { _max = timestamp; } - // FIXME: - public void ungetTimestamp() { - --_max; - } - @Override public Long peekTimestamp() { return _max; diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java index 42fb5de1..a84091a8 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java @@ -8,15 +8,16 @@ import java.util.function.Function; import java.util.logging.Level; import java.util.logging.Logger; -public class KleppmannTree, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT, WrapperT extends TreeNodeWrapper> { +public class KleppmannTree, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> { private static final Logger LOGGER = Logger.getLogger(KleppmannTree.class.getName()); - private final StorageInterface _storage; + + private final StorageInterface _storage; private final PeerInterface _peers; private final Clock _clock; private final OpRecorder _opRecorder; - private HashMap _undoCtx = null; + private HashMap> _undoCtx = null; - public KleppmannTree(StorageInterface storage, + public KleppmannTree(StorageInterface storage, PeerInterface peers, Clock clock, OpRecorder opRecorder) { @@ -30,13 +31,8 @@ public class KleppmannTree, PeerIdT ex if (names.isEmpty()) return fromId; var from = _storage.getById(fromId); - from.rLock(); NodeIdT childId; - try { - childId = from.getNode().getChildren().get(names.getFirst()); - } finally { - from.rUnlock(); - } + childId = from.children().get(names.getFirst()); if (childId == null) return null; @@ -45,69 +41,58 @@ public class KleppmannTree, PeerIdT ex } public NodeIdT traverse(NodeIdT fromId, List names) { - _storage.rLock(); - try { - return traverseImpl(fromId, names.subList(1, names.size())); - } finally { - _storage.rUnlock(); - } + return traverseImpl(fromId, names.subList(1, names.size())); } public NodeIdT traverse(List names) { - _storage.rLock(); - try { - return traverseImpl(_storage.getRootId(), names); - } finally { - _storage.rUnlock(); - } + return traverseImpl(_storage.getRootId(), names); } private void undoEffect(LogEffect effect) { - _storage.assertRwLock(); if (effect.oldInfo() != null) { var node = _storage.getById(effect.childId()); - var oldParent = _storage.getById(effect.oldInfo().oldParent()); var curParent = _storage.getById(effect.newParentId()); - curParent.rwLock(); - oldParent.rwLock(); - node.rwLock(); - try { - curParent.getNode().getChildren().remove(node.getNode().getMeta().getName()); - if (!node.getNode().getMeta().getClass().equals(effect.oldInfo().oldMeta().getClass())) - throw new IllegalArgumentException("Class mismatch for meta for node " + node.getNode().getId()); - node.getNode().setMeta(effect.oldInfo().oldMeta()); - node.getNode().setParent(oldParent.getNode().getId()); - oldParent.getNode().getChildren().put(node.getNode().getMeta().getName(), node.getNode().getId()); - node.notifyRmRef(curParent.getNode().getId()); - node.notifyRef(oldParent.getNode().getId()); - node.getNode().setLastEffectiveOp(effect.oldInfo().oldEffectiveMove()); - } finally { - node.rwUnlock(); - oldParent.rwUnlock(); - curParent.rwUnlock(); + { + var newCurParentChildren = curParent.children().minus(node.meta().getName()); + curParent = curParent.withChildren(newCurParentChildren); + _storage.putNode(curParent); } + + if (!node.meta().getClass().equals(effect.oldInfo().oldMeta().getClass())) + throw new IllegalArgumentException("Class mismatch for meta for node " + node.key()); + + // Needs to be read after changing curParent, as it might be the same node + var oldParent = _storage.getById(effect.oldInfo().oldParent()); + { + var newOldParentChildren = oldParent.children().plus(node.meta().getName(), node.key()); + oldParent = oldParent.withChildren(newOldParentChildren); + _storage.putNode(oldParent); + } + _storage.putNode( + node.withMeta(effect.oldInfo().oldMeta()) + .withParent(effect.oldInfo().oldParent()) + .withLastEffectiveOp(effect.oldInfo().oldEffectiveMove()) + ); } else { var node = _storage.getById(effect.childId()); var curParent = _storage.getById(effect.newParentId()); - curParent.rwLock(); - node.rwLock(); - try { - curParent.getNode().getChildren().remove(node.getNode().getMeta().getName()); - node.freeze(); - node.getNode().setParent(null); - node.getNode().setLastEffectiveOp(null); - node.notifyRmRef(curParent.getNode().getId()); - _undoCtx.put(node.getNode().getId(), node); - } finally { - node.rwUnlock(); - curParent.rwUnlock(); + { + var newCurParentChildren = curParent.children().minus(node.meta().getName()); + curParent = curParent.withChildren(newCurParentChildren); + _storage.putNode(curParent); } + _storage.putNode( + node.withParent(null) + .withLastEffectiveOp(null) + ); + _undoCtx.put(node.key(), node); } } private void undoOp(LogRecord op) { - for (var e : op.effects().reversed()) - undoEffect(e); + if (op.effects() != null) + for (var e : op.effects().reversed()) + undoEffect(e); } private void redoOp(Map.Entry, LogRecord> entry) { @@ -116,7 +101,6 @@ public class KleppmannTree, PeerIdT ex } private void doAndPut(OpMove op, boolean failCreatingIfExists) { - _storage.assertRwLock(); var res = doOp(op, failCreatingIfExists); _storage.getLog().put(res.op().timestamp(), res); } @@ -160,22 +144,15 @@ public class KleppmannTree, PeerIdT ex } if (!inTrash.isEmpty()) { var trash = _storage.getById(_storage.getTrashId()); - trash.rwLock(); - try { - for (var n : inTrash) { - var node = _storage.getById(n); - node.rwLock(); - try { - if (trash.getNode().getChildren().remove(n.toString()) == null) - LOGGER.severe("Node " + node.getNode().getId() + " not found in trash but should be there"); - node.notifyRmRef(trash.getNode().getId()); - } finally { - node.rwUnlock(); - } - _storage.removeNode(n); + for (var n : inTrash) { + var node = _storage.getById(n); + { + if (!trash.children().containsKey(n.toString())) + LOGGER.severe("Node " + node.key() + " not found in trash but should be there"); + trash = trash.withChildren(trash.children().minus(n.toString())); + _storage.putNode(trash); } - } finally { - trash.rwUnlock(); + _storage.removeNode(n); } } } else { @@ -188,29 +165,18 @@ public class KleppmannTree, PeerIdT ex } public void move(NodeIdT newParent, MetaT newMeta, NodeIdT child, boolean failCreatingIfExists) { - _storage.rwLock(); - try { - var createdMove = createMove(newParent, newMeta, child); - _opRecorder.recordOp(createdMove); - applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists); - } finally { - _storage.rwUnlock(); - } + var createdMove = createMove(newParent, newMeta, child); + _opRecorder.recordOp(createdMove); + applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists); } public void applyExternalOp(PeerIdT from, OpMove op) { - _storage.rwLock(); - try { - _clock.updateTimestamp(op.timestamp().timestamp()); - applyOp(from, op, false); - } finally { - _storage.rwUnlock(); - } + _clock.updateTimestamp(op.timestamp().timestamp()); + applyOp(from, op, false); } // Returns true if the timestamp is newer than what's seen, false otherwise private boolean updateTimestampImpl(PeerIdT from, TimestampT newTimestamp) { - _storage.assertRwLock(); TimestampT oldRef = _storage.getPeerTimestampLog().getForPeer(from); if (oldRef != null && oldRef.compareTo(newTimestamp) > 0) { // FIXME? LOGGER.warning("Wrong op order: received older than known from " + from.toString()); @@ -221,31 +187,18 @@ public class KleppmannTree, PeerIdT ex } public boolean updateExternalTimestamp(PeerIdT from, TimestampT timestamp) { - _storage.rLock(); - try { - // TODO: Ideally no point in this separate locking? - var gotExt = _storage.getPeerTimestampLog().getForPeer(from); - var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId()); - if ((gotExt != null && gotExt.compareTo(timestamp) >= 0) - && (gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0)) return false; - } finally { - _storage.rUnlock(); - } - _storage.rwLock(); - try { - updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack? - updateTimestampImpl(from, timestamp); - tryTrimLog(); - } finally { - _storage.rwUnlock(); - } - + // TODO: Ideally no point in this separate locking? + var gotExt = _storage.getPeerTimestampLog().getForPeer(from); + var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId()); + if ((gotExt != null && gotExt.compareTo(timestamp) >= 0) + && (gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0)) return false; + updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack? + updateTimestampImpl(from, timestamp); + tryTrimLog(); return true; } private void applyOp(PeerIdT from, OpMove op, boolean failCreatingIfExists) { - _storage.assertRwLock(); - if (!updateTimestampImpl(from, op.timestamp().timestamp())) return; var log = _storage.getLog(); @@ -276,7 +229,6 @@ public class KleppmannTree, PeerIdT ex if (!_undoCtx.isEmpty()) { for (var e : _undoCtx.entrySet()) { LOGGER.log(Level.FINE, "Dropping node " + e.getKey()); - e.getValue().unfreeze(); _storage.removeNode(e.getKey()); } } @@ -292,12 +244,10 @@ public class KleppmannTree, PeerIdT ex } private CombinedTimestamp getTimestamp() { - _storage.assertRwLock(); return new CombinedTimestamp<>(_clock.getTimestamp(), _peers.getSelfId()); } private OpMove createMove(NodeIdT newParent, LocalMetaT newMeta, NodeIdT node) { - _storage.assertRwLock(); return new OpMove<>(getTimestamp(), newParent, newMeta, node); } @@ -317,91 +267,73 @@ public class KleppmannTree, PeerIdT ex return computed; } - private WrapperT getNewNode(TreeNode desired) { - _storage.assertRwLock(); + private TreeNode getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) { if (_undoCtx != null) { - var node = _undoCtx.get(desired.getId()); + var node = _undoCtx.get(key); if (node != null) { - node.rwLock(); try { - if (!node.getNode().getChildren().isEmpty()) { - LOGGER.log(Level.WARNING, "Not empty children for undone node " + desired.getId()); + if (!node.children().isEmpty()) { + LOGGER.log(Level.WARNING, "Not empty children for undone node " + key); } - node.getNode().setParent(desired.getParent()); - node.notifyRef(desired.getParent()); - node.getNode().setMeta(desired.getMeta()); - node.unfreeze(); + node = node.withParent(parent).withMeta(meta); } catch (Exception e) { - LOGGER.log(Level.SEVERE, "Error while fixing up node " + desired.getId(), e); - node.rwUnlock(); + LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e); node = null; } } if (node != null) { - _undoCtx.remove(desired.getId()); + _undoCtx.remove(key); return node; } } - return _storage.createNewNode(desired); + return _storage.createNewNode(key, parent, meta); } private void applyEffects(OpMove sourceOp, List> effects) { - _storage.assertRwLock(); for (var effect : effects) { - WrapperT oldParentNode = null; - WrapperT newParentNode; - WrapperT node; + TreeNode oldParentNode = null; + TreeNode newParentNode; + TreeNode node; - newParentNode = _storage.getById(effect.newParentId()); - newParentNode.rwLock(); - try { - if (effect.oldInfo() != null) { - oldParentNode = _storage.getById(effect.oldInfo().oldParent()); - oldParentNode.rwLock(); - } - try { - if (oldParentNode == null) { - node = getNewNode(new TreeNode<>(effect.childId(), effect.newParentId(), effect.newMeta())); - } else { - node = _storage.getById(effect.childId()); - node.rwLock(); - } - try { - - if (oldParentNode != null) { - oldParentNode.getNode().getChildren().remove(effect.oldInfo().oldMeta().getName()); - node.notifyRmRef(effect.oldInfo().oldParent()); - } - - newParentNode.getNode().getChildren().put(effect.newMeta().getName(), effect.childId()); - if (effect.newParentId().equals(_storage.getTrashId()) && - !Objects.equals(effect.newMeta().getName(), effect.childId())) - throw new IllegalArgumentException("Move to trash should have id of node as name"); - node.getNode().setParent(effect.newParentId()); - node.getNode().setMeta(effect.newMeta()); - node.getNode().setLastEffectiveOp(effect.effectiveOp()); - node.notifyRef(effect.newParentId()); - - } finally { - node.rwUnlock(); - } - } finally { - if (oldParentNode != null) - oldParentNode.rwUnlock(); - } - } finally { - newParentNode.rwUnlock(); + if (effect.oldInfo() != null) { + oldParentNode = _storage.getById(effect.oldInfo().oldParent()); } + if (oldParentNode == null) { + node = getNewNode(effect.childId(), effect.newParentId(), effect.newMeta()); + } else { + node = _storage.getById(effect.childId()); + } + if (oldParentNode != null) { + var newOldParentChildren = oldParentNode.children().minus(effect.oldInfo().oldMeta().getName()); + oldParentNode = oldParentNode.withChildren(newOldParentChildren); + _storage.putNode(oldParentNode); + } + + // Needs to be read after changing oldParentNode, as it might be the same node + newParentNode = _storage.getById(effect.newParentId()); + + { + var newNewParentChildren = newParentNode.children().plus(effect.newMeta().getName(), effect.childId()); + newParentNode = newParentNode.withChildren(newNewParentChildren); + _storage.putNode(newParentNode); + } + if (effect.newParentId().equals(_storage.getTrashId()) && + !Objects.equals(effect.newMeta().getName(), effect.childId().toString())) + throw new IllegalArgumentException("Move to trash should have id of node as name"); + _storage.putNode( + node.withParent(effect.newParentId()) + .withMeta(effect.newMeta()) + .withLastEffectiveOp(sourceOp) + ); } } private LogRecord computeEffects(OpMove op, boolean failCreatingIfExists) { - _storage.assertRwLock(); var node = _storage.getById(op.childId()); - NodeIdT oldParentId = (node != null && node.getNode().getParent() != null) ? node.getNode().getParent() : null; + NodeIdT oldParentId = (node != null && node.parent() != null) ? node.parent() : null; NodeIdT newParentId = op.newParentId(); - WrapperT newParent = _storage.getById(newParentId); + TreeNode newParent = _storage.getById(newParentId); if (newParent == null) { LOGGER.log(Level.SEVERE, "New parent not found " + op.newMeta().getName() + " " + op.childId()); @@ -409,34 +341,29 @@ public class KleppmannTree, PeerIdT ex } if (oldParentId == null) { - newParent.rLock(); - try { - var conflictNodeId = newParent.getNode().getChildren().get(op.newMeta().getName()); + var conflictNodeId = newParent.children().get(op.newMeta().getName()); - if (conflictNodeId != null) { - if (failCreatingIfExists) - throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId); + if (conflictNodeId != null) { + if (failCreatingIfExists) + throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId); - var conflictNode = _storage.getById(conflictNodeId); - conflictNode.rLock(); - try { - MetaT conflictNodeMeta = conflictNode.getNode().getMeta(); - String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.getNode().getId(); - String newOursName = op.newMeta().getName() + ".conflict." + op.childId(); - return new LogRecord<>(op, List.of( - new LogEffect<>(new LogEffectOld<>(conflictNode.getNode().getLastEffectiveOp(), newParentId, conflictNodeMeta), conflictNode.getNode().getLastEffectiveOp(), newParentId, (MetaT) conflictNodeMeta.withName(newConflictNodeName), conflictNodeId), - new LogEffect<>(null, op, op.newParentId(), (MetaT) op.newMeta().withName(newOursName), op.childId()) - )); - } finally { - conflictNode.rUnlock(); - } - } else { - return new LogRecord<>(op, List.of( - new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId()) - )); + var conflictNode = _storage.getById(conflictNodeId); + MetaT conflictNodeMeta = conflictNode.meta(); + + if (Objects.equals(conflictNodeMeta, op.newMeta())) { + return new LogRecord<>(op, null); } - } finally { - newParent.rUnlock(); + + String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.key(); + String newOursName = op.newMeta().getName() + ".conflict." + op.childId(); + return new LogRecord<>(op, List.of( + new LogEffect<>(new LogEffectOld<>(conflictNode.lastEffectiveOp(), newParentId, conflictNodeMeta), conflictNode.lastEffectiveOp(), newParentId, (MetaT) conflictNodeMeta.withName(newConflictNodeName), conflictNodeId), + new LogEffect<>(null, op, op.newParentId(), (MetaT) op.newMeta().withName(newOursName), op.childId()) + )); + } else { + return new LogRecord<>(op, List.of( + new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId()) + )); } } @@ -444,96 +371,69 @@ public class KleppmannTree, PeerIdT ex return new LogRecord<>(op, null); } - node.rLock(); - newParent.rLock(); - try { - MetaT oldMeta = node.getNode().getMeta(); - if (!oldMeta.getClass().equals(op.newMeta().getClass())) { - LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.getNode().getId()); + MetaT oldMeta = node.meta(); + if (!oldMeta.getClass().equals(op.newMeta().getClass())) { + LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key()); + return new LogRecord<>(op, null); + } + var replaceNodeId = newParent.children().get(op.newMeta().getName()); + if (replaceNodeId != null) { + var replaceNode = _storage.getById(replaceNodeId); + var replaceNodeMeta = replaceNode.meta(); + + if (Objects.equals(replaceNodeMeta, op.newMeta())) { return new LogRecord<>(op, null); } - var replaceNodeId = newParent.getNode().getChildren().get(op.newMeta().getName()); - if (replaceNodeId != null) { - var replaceNode = _storage.getById(replaceNodeId); - try { - replaceNode.rLock(); - var replaceNodeMeta = replaceNode.getNode().getMeta(); - return new LogRecord<>(op, List.of( - new LogEffect<>(new LogEffectOld<>(replaceNode.getNode().getLastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.getNode().getLastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId), - new LogEffect<>(new LogEffectOld<>(node.getNode().getLastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) - )); - } finally { - replaceNode.rUnlock(); - } - } + return new LogRecord<>(op, List.of( - new LogEffect<>(new LogEffectOld<>(node.getNode().getLastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) + new LogEffect<>(new LogEffectOld<>(replaceNode.lastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.lastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId), + new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) )); - } finally { - newParent.rUnlock(); - node.rUnlock(); } + return new LogRecord<>(op, List.of( + new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) + )); } private boolean isAncestor(NodeIdT child, NodeIdT parent) { var node = _storage.getById(parent); NodeIdT curParent; - while ((curParent = node.getNode().getParent()) != null) { + while ((curParent = node.parent()) != null) { if (Objects.equals(child, curParent)) return true; node = _storage.getById(curParent); } return false; } - public void walkTree(Consumer consumer) { - _storage.rLock(); - try { - ArrayDeque queue = new ArrayDeque<>(); - queue.push(_storage.getRootId()); + public void walkTree(Consumer> consumer) { + ArrayDeque queue = new ArrayDeque<>(); + queue.push(_storage.getRootId()); - while (!queue.isEmpty()) { - var id = queue.pop(); - var node = _storage.getById(id); - if (node == null) continue; - node.rLock(); - try { - queue.addAll(node.getNode().getChildren().values()); - consumer.accept(node); - } finally { - node.rUnlock(); - } - } - } finally { - _storage.rUnlock(); + while (!queue.isEmpty()) { + var id = queue.pop(); + var node = _storage.getById(id); + if (node == null) continue; + queue.addAll(node.children().values()); + consumer.accept(node); } } - public Pair findParent(Function kidPredicate) { - _storage.rLock(); - try { - ArrayDeque queue = new ArrayDeque<>(); - queue.push(_storage.getRootId()); + public Pair findParent(Function, Boolean> kidPredicate) { + ArrayDeque queue = new ArrayDeque<>(); + queue.push(_storage.getRootId()); - while (!queue.isEmpty()) { - var id = queue.pop(); - var node = _storage.getById(id); - if (node == null) continue; - node.rLock(); - try { - var children = node.getNode().getChildren(); - for (var childEntry : children.entrySet()) { - var child = _storage.getById(childEntry.getValue()); - if (kidPredicate.apply(child)) { - return Pair.of(childEntry.getKey(), node.getNode().getId()); - } - } - queue.addAll(children.values()); - } finally { - node.rUnlock(); + while (!queue.isEmpty()) { + var id = queue.pop(); + var node = _storage.getById(id); + if (node == null) continue; + var children = node.children(); + for (var childEntry : children.entrySet()) { + var child = _storage.getById(childEntry.getValue()); + if (kidPredicate.apply(child)) { + return Pair.of(childEntry.getKey(), node.key()); } } - } finally { - _storage.rUnlock(); + queue.addAll(children.values()); } return null; } @@ -541,27 +441,22 @@ public class KleppmannTree, PeerIdT ex public void recordBoostrapFor(PeerIdT host) { TreeMap, OpMove> result = new TreeMap<>(); - _storage.rwLock(); - try { - walkTree(node -> { - var op = node.getNode().getLastEffectiveOp(); - if (node.getNode().getLastEffectiveOp() == null) return; - LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); - result.put(node.getNode().getLastEffectiveOp().timestamp(), node.getNode().getLastEffectiveOp()); - }); + walkTree(node -> { + var op = node.lastEffectiveOp(); + if (node.lastEffectiveOp() == null) return; + LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); + result.put(node.lastEffectiveOp().timestamp(), node.lastEffectiveOp()); + }); - for (var le : _storage.getLog().getAll()) { - var op = le.getValue().op(); - LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); - result.put(le.getKey(), le.getValue().op()); - } + for (var le : _storage.getLog().getAll()) { + var op = le.getValue().op(); + LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); + result.put(le.getKey(), le.getValue().op()); + } - for (var op : result.values()) { - LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); - _opRecorder.recordOpForPeer(host, op); - } - } finally { - _storage.rwUnlock(); + for (var op : result.values()) { + LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); + _opRecorder.recordOpForPeer(host, op); } } } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffect.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffect.java index 5cd564b7..0fe9a95f 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffect.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffect.java @@ -1,9 +1,11 @@ package com.usatiuk.kleppmanntree; +import java.io.Serializable; + public record LogEffect, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT>( LogEffectOld oldInfo, OpMove effectiveOp, NodeIdT newParentId, MetaT newMeta, - NodeIdT childId) { + NodeIdT childId) implements Serializable { } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffectOld.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffectOld.java index ec3f2662..c1c0a477 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffectOld.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffectOld.java @@ -1,6 +1,9 @@ package com.usatiuk.kleppmanntree; +import java.io.Serializable; + public record LogEffectOld, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> (OpMove oldEffectiveMove, NodeIdT oldParent, - MetaT oldMeta) {} + MetaT oldMeta) implements Serializable { +} diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogRecord.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogRecord.java index b9a7b9da..2fb036c4 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogRecord.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogRecord.java @@ -1,7 +1,9 @@ package com.usatiuk.kleppmanntree; +import java.io.Serializable; import java.util.List; public record LogRecord, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> (OpMove op, - List> effects) {} + List> effects) implements Serializable { +} diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/OpMove.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/OpMove.java index e9c19562..85b7f383 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/OpMove.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/OpMove.java @@ -1,5 +1,8 @@ package com.usatiuk.kleppmanntree; +import java.io.Serializable; + public record OpMove, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> (CombinedTimestamp timestamp, NodeIdT newParentId, MetaT newMeta, - NodeIdT childId) {} + NodeIdT childId) implements Serializable { +} diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/StorageInterface.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/StorageInterface.java index 69467386..af55b35b 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/StorageInterface.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/StorageInterface.java @@ -4,32 +4,23 @@ public interface StorageInterface< TimestampT extends Comparable, PeerIdT extends Comparable, MetaT extends NodeMeta, - NodeIdT, - WrapperT extends TreeNodeWrapper> { + NodeIdT> { NodeIdT getRootId(); NodeIdT getTrashId(); NodeIdT getNewNodeId(); - WrapperT getById(NodeIdT id); + TreeNode getById(NodeIdT id); // Creates a node, returned wrapper is RW-locked - WrapperT createNewNode(TreeNode node); + TreeNode createNewNode(NodeIdT key, NodeIdT parent, MetaT meta); + + void putNode(TreeNode node); void removeNode(NodeIdT id); LogInterface getLog(); PeerTimestampLogInterface getPeerTimestampLog(); - - void rLock(); - - void rUnlock(); - - void rwLock(); - - void rwUnlock(); - - void assertRwLock(); } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java index a2b1577f..f490bb9e 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java @@ -1,31 +1,26 @@ package com.usatiuk.kleppmanntree; -import lombok.Getter; -import lombok.Setter; +import org.pcollections.PMap; -import java.util.HashMap; +import java.io.Serializable; import java.util.Map; -@Getter -@Setter -public class TreeNode, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> { - private final NodeIdT _id; - private NodeIdT _parent = null; - private OpMove _lastEffectiveOp = null; - private MetaT _meta = null; - private Map _children = new HashMap<>(); +public interface TreeNode, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> extends Serializable { + NodeIdT key(); - public TreeNode(NodeIdT id, NodeIdT parent, MetaT meta) { - _id = id; - _meta = meta; - _parent = parent; - } + NodeIdT parent(); - public TreeNode(NodeIdT id, NodeIdT parent, MetaT meta, Map children) { - _id = id; - _meta = meta; - _parent = parent; - _children = children; - } + OpMove lastEffectiveOp(); + MetaT meta(); + + PMap children(); + + TreeNode withParent(NodeIdT parent); + + TreeNode withLastEffectiveOp(OpMove lastEffectiveOp); + + TreeNode withMeta(MetaT meta); + + TreeNode withChildren(PMap children); } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNodeWrapper.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNodeWrapper.java deleted file mode 100644 index 57869231..00000000 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNodeWrapper.java +++ /dev/null @@ -1,21 +0,0 @@ -package com.usatiuk.kleppmanntree; - -public interface TreeNodeWrapper, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> { - void rLock(); - - void rUnlock(); - - void rwLock(); - - void rwUnlock(); - - void freeze(); - - void unfreeze(); - - void notifyRef(NodeIdT id); - - void notifyRmRef(NodeIdT id); - - TreeNode getNode(); -} diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java index e95ce17a..dfe99ebd 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java @@ -32,8 +32,8 @@ public class KleppmanTreeSimpleTest { Assertions.assertEquals(d1id, testNode2._tree.traverse(List.of("Test1"))); Assertions.assertEquals(d2id, testNode2._tree.traverse(List.of("Test2"))); - Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); - Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); + Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); + Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); var f1id = testNode1._storageInterface.getNewNodeId(); @@ -54,10 +54,10 @@ public class KleppmanTreeSimpleTest { testNode1._tree.move(d1id, new TestNodeMetaDir("Test2"), d2id); Assertions.assertEquals(d1id, testNode1._tree.traverse(List.of("Test1"))); Assertions.assertEquals(d2id, testNode1._tree.traverse(List.of("Test1", "Test2"))); - Assertions.assertIterableEquals(List.of("Test1"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); + Assertions.assertIterableEquals(List.of("Test1"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); testNode2._tree.move(d2id, new TestNodeMetaDir("Test1"), d1id); - Assertions.assertIterableEquals(List.of("Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); + Assertions.assertIterableEquals(List.of("Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); Assertions.assertEquals(d2id, testNode2._tree.traverse(List.of("Test2"))); Assertions.assertEquals(d1id, testNode2._tree.traverse(List.of("Test2", "Test1"))); @@ -72,8 +72,8 @@ public class KleppmanTreeSimpleTest { } // Second node wins as it has smaller timestamp - Assertions.assertIterableEquals(List.of("Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); - Assertions.assertIterableEquals(List.of("Test1", "TestFile"), testNode1._storageInterface.getById(d2id).getNode().getChildren().keySet()); + Assertions.assertIterableEquals(List.of("Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); + Assertions.assertIterableEquals(List.of("Test1", "TestFile"), testNode1._storageInterface.getById(d2id).children().keySet().stream().sorted().toList()); Assertions.assertEquals(d2id, testNode1._tree.traverse(List.of("Test2"))); Assertions.assertEquals(d1id, testNode1._tree.traverse(List.of("Test2", "Test1"))); Assertions.assertEquals(f1id, testNode1._tree.traverse(List.of("Test2", "TestFile"))); @@ -81,8 +81,8 @@ public class KleppmanTreeSimpleTest { var f11 = testNode1._storageInterface.getById(f1id); var f12 = testNode2._storageInterface.getById(f1id); - Assertions.assertEquals(f11.getNode().getMeta(), f12.getNode().getMeta()); - Assertions.assertInstanceOf(TestNodeMetaFile.class, f11.getNode().getMeta()); + Assertions.assertEquals(f11.meta(), f12.meta()); + Assertions.assertInstanceOf(TestNodeMetaFile.class, f11.meta()); // Trim test Assertions.assertTrue(testNode1._storageInterface.getLog().size() <= 1); diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNode.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNode.java index 005cf2b0..53f2a7c6 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNode.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNode.java @@ -9,7 +9,7 @@ public class TestNode { protected final TestClock _clock; protected final TestPeerInterface _peerInterface; protected final TestStorageInterface _storageInterface; - protected final KleppmannTree _tree; + protected final KleppmannTree _tree; private final TestOpRecorder _recorder; public TestNode(long id) { diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java index 2c2e9f79..be276c9c 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java @@ -1,12 +1,16 @@ package com.usatiuk.kleppmanntree; -import lombok.Getter; - public abstract class TestNodeMeta implements NodeMeta { - @Getter private final String _name; - public TestNodeMeta(String name) {_name = name;} + public TestNodeMeta(String name) { + _name = name; + } + + @Override + public String getName() { + return _name; + } abstract public NodeMeta withName(String name); } diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java index 8a5bc91d..9cb0792f 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java @@ -1,9 +1,6 @@ package com.usatiuk.kleppmanntree; -import lombok.Getter; - public class TestNodeMetaFile extends TestNodeMeta { - @Getter private final long _inode; public TestNodeMetaFile(String name, long inode) { @@ -11,6 +8,10 @@ public class TestNodeMetaFile extends TestNodeMeta { _inode = inode; } + public long getInode() { + return _inode; + } + @Override public NodeMeta withName(String name) { return new TestNodeMetaFile(name, _inode); diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeWrapper.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeWrapper.java deleted file mode 100644 index 57a4f600..00000000 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeWrapper.java +++ /dev/null @@ -1,52 +0,0 @@ -package com.usatiuk.kleppmanntree; - -public class TestNodeWrapper implements TreeNodeWrapper { - private final TreeNode _backingNode; - - public TestNodeWrapper(TreeNode backingNode) {_backingNode = backingNode;} - - @Override - public void rLock() { - - } - - @Override - public void rUnlock() { - - } - - @Override - public void rwLock() { - - } - - @Override - public void rwUnlock() { - - } - - @Override - public void freeze() { - - } - - @Override - public void unfreeze() { - - } - - @Override - public void notifyRef(Long id) { - - } - - @Override - public void notifyRmRef(Long id) { - - } - - @Override - public TreeNode getNode() { - return _backingNode; - } -} diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestPeerInterface.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestPeerInterface.java index 3f793aab..708bb204 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestPeerInterface.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestPeerInterface.java @@ -6,7 +6,9 @@ import java.util.List; public class TestPeerInterface implements PeerInterface { private final long selfId; - public TestPeerInterface(long selfId) {this.selfId = selfId;} + public TestPeerInterface(long selfId) { + this.selfId = selfId; + } @Override public Long getSelfId() { diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestStorageInterface.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestStorageInterface.java index 0228d9bf..415f146a 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestStorageInterface.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestStorageInterface.java @@ -3,17 +3,17 @@ package com.usatiuk.kleppmanntree; import java.util.HashMap; import java.util.Map; -public class TestStorageInterface implements StorageInterface { +public class TestStorageInterface implements StorageInterface { private final long _peerId; - private final Map> _nodes = new HashMap<>(); + private final Map _nodes = new HashMap<>(); private final TestLog _log = new TestLog(); private final TestPeerLog _peerLog = new TestPeerLog(); private long _curId = 1; public TestStorageInterface(long peerId) { _peerId = peerId; - _nodes.put(getRootId(), new TreeNode<>(getRootId(), null, null)); - _nodes.put(getTrashId(), new TreeNode<>(getTrashId(), null, null)); + _nodes.put(getRootId(), new TestTreeNode(getRootId(), null, null)); + _nodes.put(getTrashId(), new TestTreeNode(getTrashId(), null, null)); } @Override @@ -32,18 +32,18 @@ public class TestStorageInterface implements StorageInterface node) { - if (!_nodes.containsKey(node.getId())) { - _nodes.put(node.getId(), node); - return new TestNodeWrapper(node); - } - throw new IllegalStateException("Node with id " + node.getId() + " already exists"); + public TestTreeNode createNewNode(Long key, Long parent, TestNodeMeta meta) { + return new TestTreeNode(key, parent, meta); + } + + @Override + public void putNode(TreeNode node) { + _nodes.put(node.key(), (TestTreeNode) node); } @Override @@ -53,7 +53,6 @@ public class TestStorageInterface implements StorageInterface getLog() { return _log; @@ -64,29 +63,4 @@ public class TestStorageInterface implements StorageInterface getPeerTimestampLog() { return _peerLog; } - - @Override - public void rLock() { - - } - - @Override - public void rUnlock() { - - } - - @Override - public void rwLock() { - - } - - @Override - public void rwUnlock() { - - } - - @Override - public void assertRwLock() { - - } } diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java new file mode 100644 index 00000000..373eb580 --- /dev/null +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java @@ -0,0 +1,33 @@ +package com.usatiuk.kleppmanntree; + +import org.pcollections.HashTreePMap; +import org.pcollections.PMap; + +public record TestTreeNode(Long key, Long parent, OpMove lastEffectiveOp, + TestNodeMeta meta, + PMap children) implements TreeNode { + + public TestTreeNode(Long id, Long parent, TestNodeMeta meta) { + this(id, parent, null, meta, HashTreePMap.empty()); + } + + @Override + public TreeNode withParent(Long parent) { + return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); + } + + @Override + public TreeNode withLastEffectiveOp(OpMove lastEffectiveOp) { + return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); + } + + @Override + public TreeNode withMeta(TestNodeMeta meta) { + return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); + } + + @Override + public TreeNode withChildren(PMap children) { + return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); + } +} diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml new file mode 100644 index 00000000..b11658fb --- /dev/null +++ b/dhfs-parent/objects/pom.xml @@ -0,0 +1,112 @@ + + + 4.0.0 + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + objects + + + 21 + 21 + UTF-8 + + + + + io.quarkus + quarkus-junit5 + test + + + io.quarkus + quarkus-arc + + + io.quarkus + quarkus-grpc + + + net.openhft + zero-allocation-hashing + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.apache.commons + commons-lang3 + + + org.jboss.slf4j + slf4j-jboss-logmanager + test + + + com.usatiuk.dhfs + utils + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + supportlib + 1.0-SNAPSHOT + + + io.quarkus + quarkus-junit5-mockito + test + + + org.lmdbjava + lmdbjava + 0.9.1 + + + org.apache.commons + commons-collections4 + + + org.pcollections + pcollections + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + 1C + false + classes + + + + ${quarkus.platform.group-id} + quarkus-maven-plugin + ${quarkus.platform.version} + true + + + quarkus-plugin + + build + generate-code + generate-code-tests + + + + + + + + \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java new file mode 100644 index 00000000..7014f8a2 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java @@ -0,0 +1,24 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.Iterator; + +public interface CloseableKvIterator, V> extends Iterator>, AutoCloseableNoThrow { + K peekNextKey(); + + void skip(); + + K peekPrevKey(); + + Pair prev(); + + boolean hasPrev(); + + void skipPrev(); + + default CloseableKvIterator reversed() { + return new ReversedKvIterator<>(this); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java new file mode 100644 index 00000000..604a10f8 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -0,0 +1,55 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Iterator; +import java.util.Optional; + +@ApplicationScoped +public class CurrentTransaction implements Transaction { + @Inject + TransactionManager transactionManager; + + @Override + public void onCommit(Runnable runnable) { + transactionManager.current().onCommit(runnable); + } + + @Override + public void onFlush(Runnable runnable) { + transactionManager.current().onFlush(runnable); + } + + @Override + public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { + return transactionManager.current().get(type, key, strategy); + } + + @Override + public void delete(JObjectKey key) { + transactionManager.current().delete(key); + } + + @Nonnull + @Override + public Collection findAllObjects() { + return transactionManager.current().findAllObjects(); + } + + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return transactionManager.current().getIterator(start, key); + } + + @Override + public void put(JData obj) { + transactionManager.current().put(obj); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Data.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Data.java new file mode 100644 index 00000000..b1f7bcb7 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Data.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects; + +import java.util.Optional; + +public record Data(V value) implements MaybeTombstone { + @Override + public Optional opt() { + return Optional.of(value); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/IterProdFn.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/IterProdFn.java new file mode 100644 index 00000000..01798da9 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/IterProdFn.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; + +@FunctionalInterface +public interface IterProdFn, V> { + CloseableKvIterator get(IteratorStart start, K key); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java new file mode 100644 index 00000000..501e3c35 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java @@ -0,0 +1,16 @@ +package com.usatiuk.dhfs.objects; + +import java.io.Serializable; + +// TODO: This could be maybe moved to a separate module? +// The base class for JObject data +// Only one instance of this "exists" per key, the instance in the manager is canonical +// When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred. +// It is immutable, its version is filled in by the allocator from the AllocVersionProvider +public interface JData extends Serializable { + JObjectKey key(); + + default int estimateSize() { + return 100; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java new file mode 100644 index 00000000..d1aaddc2 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects; + +import jakarta.annotation.Nonnull; + +import java.io.Serializable; + +public record JDataVersionedWrapper(@Nonnull JData data, long version) implements Serializable { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java new file mode 100644 index 00000000..b702069b --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java @@ -0,0 +1,44 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; + +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + +public record JObjectKey(String name) implements Serializable, Comparable { + public static JObjectKey of(String name) { + return new JObjectKey(name); + } + + @Override + public int compareTo(JObjectKey o) { + return name.compareTo(o.name); + } + + @Override + public String toString() { + return name; + } + + public byte[] bytes() { + return name.getBytes(StandardCharsets.UTF_8); + } + + public ByteBuffer toByteBuffer() { + var heapBb = StandardCharsets.UTF_8.encode(name); + if (heapBb.isDirect()) return heapBb; + var directBb = UninitializedByteBuffer.allocateUninitialized(heapBb.remaining()); + directBb.put(heapBb); + directBb.flip(); + return directBb; + } + + public static JObjectKey fromBytes(byte[] bytes) { + return new JObjectKey(new String(bytes, StandardCharsets.UTF_8)); + } + + public static JObjectKey fromByteBuffer(ByteBuffer buff) { + return new JObjectKey(StandardCharsets.UTF_8.decode(buff).toString()); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java new file mode 100644 index 00000000..37e6798d --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -0,0 +1,239 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.snapshot.SnapshotManager; +import com.usatiuk.dhfs.objects.transaction.*; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import io.quarkus.logging.Log; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.enterprise.inject.Instance; +import jakarta.inject.Inject; + +import java.util.*; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Stream; + +// Manages all access to com.usatiuk.dhfs.objects.JData objects. +// In particular, it serves as a source of truth for what is committed to the backing storage. +// All data goes through it, it is responsible for transaction atomicity +// TODO: persistent tx id +@ApplicationScoped +public class JObjectManager { + private final List _preCommitTxHooks; + private boolean _ready = false; + @Inject + SnapshotManager snapshotManager; + @Inject + TransactionFactory transactionFactory; + @Inject + LockManager lockManager; + + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Wrong service order!"); + } + + void init(@Observes @Priority(200) StartupEvent event) { + _ready = true; + } + + JObjectManager(Instance preCommitTxHooks) { + _preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList(); + } + + public TransactionPrivate createTransaction() { + verifyReady(); + var tx = transactionFactory.createTransaction(); + Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id()); + return tx; + } + + public TransactionHandle commit(TransactionPrivate tx) { + verifyReady(); + var writes = new LinkedHashMap>(); + var dependenciesLocked = new LinkedHashMap>(); + Map> readSet; + var toUnlock = new ArrayList(); + + Consumer addDependency = + key -> { + dependenciesLocked.computeIfAbsent(key, k -> { + var lock = lockManager.lockObject(k); + toUnlock.add(lock); + return snapshotManager.readObjectDirect(k); + }); + }; + + // For existing objects: + // Check that their version is not higher than the version of transaction being committed + // TODO: check deletions, inserts + try { + try { + Function getCurrent = + key -> switch (writes.get(key)) { + case TxRecord.TxObjectRecordWrite write -> write.data(); + case TxRecord.TxObjectRecordDeleted deleted -> null; + case null -> tx.readSource().get(JData.class, key).orElse(null); + default -> { + throw new TxCommitException("Unexpected value: " + writes.get(key)); + } + }; + + boolean somethingChanged; + do { + somethingChanged = false; + Map> currentIteration = new HashMap(); + for (var hook : _preCommitTxHooks) { + for (var n : tx.drainNewWrites()) + currentIteration.put(n.key(), n); + Log.trace("Commit iteration with " + currentIteration.size() + " records for hook " + hook.getClass()); + + for (var entry : currentIteration.entrySet()) { + somethingChanged = true; + Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey()); + var oldObj = getCurrent.apply(entry.getKey()); + switch (entry.getValue()) { + case TxRecord.TxObjectRecordWrite write -> { + if (oldObj == null) { + hook.onCreate(write.key(), write.data()); + } else { + hook.onChange(write.key(), oldObj, write.data()); + } + } + case TxRecord.TxObjectRecordDeleted deleted -> { + hook.onDelete(deleted.key(), oldObj); + } + default -> throw new TxCommitException("Unexpected value: " + entry); + } + } + } + writes.putAll(currentIteration); + } while (somethingChanged); + + if (writes.isEmpty()) { + Log.trace("Committing transaction - no changes"); + return new TransactionHandle() { + @Override + public void onFlush(Runnable runnable) { + runnable.run(); + } + }; + } + + } finally { + readSet = tx.reads(); + + Stream.concat(readSet.keySet().stream(), writes.keySet().stream()) + .sorted(Comparator.comparing(JObjectKey::toString)) + .forEach(addDependency); + + for (var read : readSet.entrySet()) { + if (read.getValue() instanceof TransactionObjectLocked locked) { + toUnlock.add(locked.lock()); + } + } + } + + Log.trace("Committing transaction start"); + var snapshotId = tx.snapshot().id(); + + for (var read : readSet.entrySet()) { + var dep = dependenciesLocked.get(read.getKey()); + + if (dep.isEmpty() != read.getValue().data().isEmpty()) { + Log.trace("Checking read dependency " + read.getKey() + " - not found"); + throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty()); + } + + if (dep.isEmpty()) { + // TODO: Every write gets a dependency due to hooks + continue; +// assert false; +// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty()); + } + + if (dep.get().version() > snapshotId) { + Log.trace("Checking dependency " + read.getKey() + " - newer than"); + throw new TxCommitException("Serialization hazard: " + dep.get().version() + " vs " + snapshotId); + } + + Log.trace("Checking dependency " + read.getKey() + " - ok with read"); + } + + var addFlushCallback = snapshotManager.commitTx( + writes.values().stream() + .filter(r -> { + if (r instanceof TxRecord.TxObjectRecordWrite(JData data)) { + var dep = dependenciesLocked.get(data.key()); + if (dep.isPresent() && dep.get().version() > snapshotId) { + Log.trace("Skipping write " + data.key() + " - dependency " + dep.get().version() + " vs " + snapshotId); + return false; + } + } + return true; + }).toList()); + + for (var callback : tx.getOnCommit()) { + callback.run(); + } + + for (var callback : tx.getOnFlush()) { + addFlushCallback.accept(callback); + } + + return new TransactionHandle() { + @Override + public void onFlush(Runnable runnable) { + addFlushCallback.accept(runnable); + } + }; + } catch (Throwable t) { + Log.trace("Error when committing transaction", t); + throw new TxCommitException(t.getMessage(), t); + } finally { + for (var unlock : toUnlock) { + unlock.close(); + } + tx.close(); + } + } + + public void rollback(TransactionPrivate tx) { + verifyReady(); + tx.reads().forEach((key, value) -> { + if (value instanceof TransactionObjectLocked locked) { + locked.lock().close(); + } + }); + tx.close(); + } + + // private class TransactionObjectSourceImpl implements TransactionObjectSource { +// private final long _txId; +// +// private TransactionObjectSourceImpl(long txId) { +// _txId = txId; +// } +// +// @Override +// public TransactionObject get(Class type, JObjectKey key) { +// var got = getObj(type, key); +// if (got.data().isPresent() && got.data().get().version() > _txId) { +// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId); +// } +// return got; +// } +// +// @Override +// public TransactionObject getWriteLocked(Class type, JObjectKey key) { +// var got = getObjLock(type, key); +// if (got.data().isPresent() && got.data().get().version() > _txId) { +// got.lock().close(); +// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId); +// } +// return got; +// } +// } +} \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java new file mode 100644 index 00000000..a42ebc07 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java @@ -0,0 +1,21 @@ +package com.usatiuk.dhfs.objects; + + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.utils.SerializationHelper; +import jakarta.enterprise.context.ApplicationScoped; + +import java.io.Serializable; + +@ApplicationScoped +public class JavaDataSerializer implements ObjectSerializer { + @Override + public ByteString serialize(JDataVersionedWrapper obj) { + return SerializationHelper.serialize((Serializable) obj); + } + + @Override + public JDataVersionedWrapper deserialize(ByteString data) { + return SerializationHelper.deserialize(data.toByteArray()); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/KeyPredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/KeyPredicateKvIterator.java new file mode 100644 index 00000000..b43308d2 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/KeyPredicateKvIterator.java @@ -0,0 +1,129 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.NoSuchElementException; +import java.util.function.Function; + +public class KeyPredicateKvIterator, V> extends ReversibleKvIterator { + private final CloseableKvIterator _backing; + private final Function _filter; + private K _next; + + public KeyPredicateKvIterator(CloseableKvIterator backing, IteratorStart start, K startKey, Function filter) { + _goingForward = true; + _backing = backing; + _filter = filter; + fillNext(); + + boolean shouldGoBack = false; + if (start == IteratorStart.LE) { + if (_next == null || _next.compareTo(startKey) > 0) { + shouldGoBack = true; + } + } else if (start == IteratorStart.LT) { + if (_next == null || _next.compareTo(startKey) >= 0) { + shouldGoBack = true; + } + } + + if (shouldGoBack && _backing.hasPrev()) { + _goingForward = false; + _next = null; + fillNext(); + if (_next != null) + _backing.skipPrev(); + _goingForward = true; +// _backing.skip(); + fillNext(); + } + + + switch (start) { + case LT -> { +// assert _next == null || _next.getKey().compareTo(startKey) < 0; + } + case LE -> { +// assert _next == null || _next.getKey().compareTo(startKey) <= 0; + } + case GT -> { + assert _next == null || _next.compareTo(startKey) > 0; + } + case GE -> { + assert _next == null || _next.compareTo(startKey) >= 0; + } + } + } + + private void fillNext() { + while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) { + var next = _goingForward ? _backing.peekNextKey() : _backing.peekPrevKey(); + if (!_filter.apply(next)) { + if (_goingForward) + _backing.skip(); + else + _backing.skipPrev(); + continue; + } + _next = next; + } + } + + @Override + protected void reverse() { + _goingForward = !_goingForward; + _next = null; + + fillNext(); + } + + @Override + protected K peekImpl() { + if (_next == null) + throw new NoSuchElementException(); + return _next; + } + + @Override + protected void skipImpl() { + if (_next == null) + throw new NoSuchElementException(); + _next = null; + if (_goingForward) + _backing.skip(); + else + _backing.skipPrev(); + fillNext(); + } + + @Override + protected boolean hasImpl() { + return _next != null; + } + + @Override + protected Pair nextImpl() { + if (_next == null) + throw new NoSuchElementException("No more elements"); + var retKey = _next; + _next = null; + var got = _goingForward ? _backing.next() : _backing.prev(); + assert got.getKey().equals(retKey); + fillNext(); + return got; + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public String toString() { + return "KeyPredicateKvIterator{" + + "_backing=" + _backing + + ", _next=" + _next + + '}'; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockManager.java new file mode 100644 index 00000000..8d7ae3d1 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockManager.java @@ -0,0 +1,14 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import com.usatiuk.dhfs.utils.DataLocker; +import jakarta.enterprise.context.ApplicationScoped; + +@ApplicationScoped +public class LockManager { + private final DataLocker _objLocker = new DataLocker(); + + public AutoCloseableNoThrow lockObject(JObjectKey key) { + return _objLocker.lock(key); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java new file mode 100644 index 00000000..eae8f788 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java @@ -0,0 +1,69 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +import java.util.function.Function; + +public class MappingKvIterator, V, V_T> implements CloseableKvIterator { + private final CloseableKvIterator _backing; + private final Function _transformer; + + public MappingKvIterator(CloseableKvIterator backing, Function transformer) { + _backing = backing; + _transformer = transformer; + } + + @Override + public K peekNextKey() { + return _backing.peekNextKey(); + } + + @Override + public void skip() { + _backing.skip(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasNext(); + } + + @Override + public K peekPrevKey() { + return _backing.peekPrevKey(); + } + + @Override + public Pair prev() { + var got = _backing.prev(); + return Pair.of(got.getKey(), _transformer.apply(got.getValue())); + } + + @Override + public boolean hasPrev() { + return _backing.hasPrev(); + } + + @Override + public void skipPrev() { + _backing.skipPrev(); + } + + @Override + public Pair next() { + var got = _backing.next(); + return Pair.of(got.getKey(), _transformer.apply(got.getValue())); + } + + @Override + public String toString() { + return "MappingKvIterator{" + + "_backing=" + _backing + + '}'; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MaybeTombstone.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MaybeTombstone.java new file mode 100644 index 00000000..f6d47c71 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MaybeTombstone.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects; + +import java.util.Optional; + +public interface MaybeTombstone { + Optional opt(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java new file mode 100644 index 00000000..78c8e482 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java @@ -0,0 +1,192 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.*; +import java.util.stream.Collectors; + +public class MergingKvIterator, V> extends ReversibleKvIterator { + private final Map, Integer> _iterators; + private final NavigableMap> _sortedIterators = new TreeMap<>(); + private final String _name; + + public MergingKvIterator(String name, IteratorStart startType, K startKey, List> iterators) { + _goingForward = true; + _name = name; + + IteratorStart initialStartType = startType; + K initialStartKey = startKey; + boolean fail = false; + if (startType == IteratorStart.LT || startType == IteratorStart.LE) { + // Starting at a greatest key less than/less or equal than: + // We have a bunch of iterators that have given us theirs "greatest LT/LE key" + // now we need to pick the greatest of those to start with + // But if some of them don't have a lesser key, we need to pick the smallest of those + var initialIterators = iterators.stream().map(p -> p.get(initialStartType, initialStartKey)).toList(); + try { + IteratorStart finalStartType = startType; + var found = initialIterators.stream() + .filter(CloseableKvIterator::hasNext) + .map((i) -> { + var peeked = i.peekNextKey(); +// Log.warnv("peeked: {0}, from {1}", peeked, i.getClass()); + return peeked; + }).distinct().collect(Collectors.partitioningBy(e -> finalStartType == IteratorStart.LE ? e.compareTo(initialStartKey) <= 0 : e.compareTo(initialStartKey) < 0)); + K initialMaxValue; + if (!found.get(true).isEmpty()) + initialMaxValue = found.get(true).stream().max(Comparator.naturalOrder()).orElse(null); + else + initialMaxValue = found.get(false).stream().min(Comparator.naturalOrder()).orElse(null); + if (initialMaxValue == null) { + fail = true; + } + startKey = initialMaxValue; + startType = IteratorStart.GE; + } finally { + initialIterators.forEach(CloseableKvIterator::close); + } + } + + if (fail) { + _iterators = Map.of(); + return; + } + + int counter = 0; + var iteratorsTmp = new HashMap, Integer>(); + for (var iteratorFn : iterators) { + var iterator = iteratorFn.get(startType, startKey); + iteratorsTmp.put(iterator, counter++); + } + _iterators = Map.copyOf(iteratorsTmp); + + for (CloseableKvIterator iterator : _iterators.keySet()) { + advanceIterator(iterator); + } + + Log.tracev("{0} Created: {1}", _name, _sortedIterators); + switch (initialStartType) { +// case LT -> { +// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0; +// } +// case LE -> { +// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0; +// } + case GT -> { + assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) > 0; + } + case GE -> { + assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) >= 0; + } + } + } + + @SafeVarargs + public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn... iterators) { + this(name, startType, startKey, List.of(iterators)); + } + + + private void advanceIterator(CloseableKvIterator iterator) { + if (!iterator.hasNext()) { + return; + } + + K key = iterator.peekNextKey(); + Log.tracev("{0} Advance peeked: {1}-{2}", _name, iterator, key); + if (!_sortedIterators.containsKey(key)) { + _sortedIterators.put(key, iterator); + return; + } + + // Expects that reversed iterator returns itself when reversed again + var oursPrio = _iterators.get(_goingForward ? iterator : iterator.reversed()); + var them = _sortedIterators.get(key); + var theirsPrio = _iterators.get(_goingForward ? them : them.reversed()); + if (oursPrio < theirsPrio) { + _sortedIterators.put(key, iterator); + advanceIterator(them); + } else { + Log.tracev("{0} Skipped: {1}", _name, iterator.peekNextKey()); + iterator.skip(); + advanceIterator(iterator); + } + } + + @Override + protected void reverse() { + var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry(); + Log.tracev("{0} Reversing from {1}", _name, cur); + _goingForward = !_goingForward; + _sortedIterators.clear(); + for (CloseableKvIterator iterator : _iterators.keySet()) { + // _goingForward inverted already + advanceIterator(!_goingForward ? iterator.reversed() : iterator); + } + if (_sortedIterators.isEmpty() || cur == null) { + return; + } + // Advance to the expected key, as we might have brought back some iterators + // that were at their ends + while (!_sortedIterators.isEmpty() + && ((_goingForward && peekImpl().compareTo(cur.getKey()) <= 0) + || (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) { + skipImpl(); + } + Log.tracev("{0} Reversed to {1}", _name, _sortedIterators); + } + + @Override + protected K peekImpl() { + if (_sortedIterators.isEmpty()) + throw new NoSuchElementException(); + return _goingForward ? _sortedIterators.firstKey() : _sortedIterators.lastKey(); + } + + @Override + protected void skipImpl() { + var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry(); + if (cur == null) { + throw new NoSuchElementException(); + } + cur.getValue().skip(); + advanceIterator(cur.getValue()); + Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators); + } + + @Override + protected boolean hasImpl() { + return !_sortedIterators.isEmpty(); + } + + @Override + protected Pair nextImpl() { + var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry(); + if (cur == null) { + throw new NoSuchElementException(); + } + var curVal = cur.getValue().next(); + advanceIterator(cur.getValue()); +// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, cur.getValue(), curVal, _sortedIterators.keySet()); + return curVal; + } + + + @Override + public void close() { + for (CloseableKvIterator iterator : _iterators.keySet()) { + iterator.close(); + } + } + + @Override + public String toString() { + return "MergingKvIterator{" + + "_name='" + _name + '\'' + + ", _sortedIterators=" + _sortedIterators.keySet() + + ", _iterators=" + _iterators + + '}'; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java new file mode 100644 index 00000000..c1f07007 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java @@ -0,0 +1,104 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.*; + +public class NavigableMapKvIterator, V> extends ReversibleKvIterator { + private final NavigableMap _map; + private Iterator> _iterator; + private Map.Entry _next; + + public NavigableMapKvIterator(NavigableMap map, IteratorStart start, K key) { + _map = map; + SortedMap _view; + _goingForward = true; + switch (start) { + case GE -> _view = map.tailMap(key, true); + case GT -> _view = map.tailMap(key, false); + case LE -> { + var floorKey = map.floorKey(key); + if (floorKey == null) _view = _map; + else _view = map.tailMap(floorKey, true); + } + case LT -> { + var lowerKey = map.lowerKey(key); + if (lowerKey == null) _view = _map; + else _view = map.tailMap(lowerKey, true); + } + default -> throw new IllegalArgumentException("Unknown start type"); + } + _iterator = _view.entrySet().iterator(); + fillNext(); + } + + @Override + protected void reverse() { + var oldNext = _next; + _next = null; + if (_goingForward) { + _iterator + = oldNext == null + ? _map.descendingMap().entrySet().iterator() + : _map.headMap(oldNext.getKey(), false).descendingMap().entrySet().iterator(); + } else { + _iterator + = oldNext == null + ? _map.entrySet().iterator() + : _map.tailMap(oldNext.getKey(), false).entrySet().iterator(); + } + _goingForward = !_goingForward; + fillNext(); + } + + private void fillNext() { + while (_iterator.hasNext() && _next == null) { + _next = _iterator.next(); + } + } + + @Override + protected K peekImpl() { + if (_next == null) { + throw new NoSuchElementException(); + } + return _next.getKey(); + } + + @Override + protected void skipImpl() { + if (_next == null) { + throw new NoSuchElementException(); + } + _next = null; + fillNext(); + } + + @Override + protected boolean hasImpl() { + return _next != null; + } + + @Override + protected Pair nextImpl() { + if (_next == null) { + throw new NoSuchElementException("No more elements"); + } + var ret = _next; + _next = null; + fillNext(); + return Pair.of(ret); + } + + @Override + public void close() { + } + + @Override + public String toString() { + return "NavigableMapKvIterator{" + + ", _next=" + _next + + '}'; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java new file mode 100644 index 00000000..078dd90f --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects; + +import com.google.protobuf.ByteString; + +public interface ObjectSerializer { + ByteString serialize(T obj); + + T deserialize(ByteString data); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingDelete.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingDelete.java new file mode 100644 index 00000000..8ecc85b5 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingDelete.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects; + +public record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWrite.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWrite.java new file mode 100644 index 00000000..065224f6 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWrite.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects; + +public record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWriteEntry.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWriteEntry.java new file mode 100644 index 00000000..1476e167 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWriteEntry.java @@ -0,0 +1,5 @@ +package com.usatiuk.dhfs.objects; + +public interface PendingWriteEntry { + long bundleId(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java new file mode 100644 index 00000000..3b1b50e4 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java @@ -0,0 +1,16 @@ +package com.usatiuk.dhfs.objects; + +public interface PreCommitTxHook { + default void onChange(JObjectKey key, JData old, JData cur) { + } + + default void onCreate(JObjectKey key, JData cur) { + } + + default void onDelete(JObjectKey key, JData cur) { + } + + default int getPriority() { + return 0; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java new file mode 100644 index 00000000..cfe85ffa --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java @@ -0,0 +1,129 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.NoSuchElementException; +import java.util.function.Function; + +public class PredicateKvIterator, V, V_T> extends ReversibleKvIterator { + private final CloseableKvIterator _backing; + private final Function _transformer; + private Pair _next; + + public PredicateKvIterator(CloseableKvIterator backing, IteratorStart start, K startKey, Function transformer) { + _goingForward = true; + _backing = backing; + _transformer = transformer; + fillNext(); + + boolean shouldGoBack = false; + if (start == IteratorStart.LE) { + if (_next == null || _next.getKey().compareTo(startKey) > 0) { + shouldGoBack = true; + } + } else if (start == IteratorStart.LT) { + if (_next == null || _next.getKey().compareTo(startKey) >= 0) { + shouldGoBack = true; + } + } + + if (shouldGoBack && _backing.hasPrev()) { + _goingForward = false; + _next = null; + _backing.skipPrev(); + fillNext(); + _goingForward = true; + _backing.skip(); + fillNext(); + } + + + switch (start) { + case LT -> { +// assert _next == null || _next.getKey().compareTo(startKey) < 0; + } + case LE -> { +// assert _next == null || _next.getKey().compareTo(startKey) <= 0; + } + case GT -> { + assert _next == null || _next.getKey().compareTo(startKey) > 0; + } + case GE -> { + assert _next == null || _next.getKey().compareTo(startKey) >= 0; + } + } + } + + private void fillNext() { + while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) { + var next = _goingForward ? _backing.next() : _backing.prev(); + var transformed = _transformer.apply(next.getValue()); + if (transformed == null) + continue; + _next = Pair.of(next.getKey(), transformed); + } + } + + @Override + protected void reverse() { + _goingForward = !_goingForward; + boolean wasAtEnd = _next == null; + + if (_goingForward && !wasAtEnd) + _backing.skip(); + else if (!_goingForward && !wasAtEnd) + _backing.skipPrev(); + + if (!wasAtEnd) + Log.tracev("Skipped in reverse: {0}", _next); + + _next = null; + + fillNext(); + } + + @Override + protected K peekImpl() { + if (_next == null) + throw new NoSuchElementException(); + return _next.getKey(); + } + + @Override + protected void skipImpl() { + if (_next == null) + throw new NoSuchElementException(); + _next = null; + fillNext(); + } + + @Override + protected boolean hasImpl() { + return _next != null; + } + + @Override + protected Pair nextImpl() { + if (_next == null) + throw new NoSuchElementException("No more elements"); + var ret = _next; + _next = null; + fillNext(); + return ret; + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public String toString() { + return "PredicateKvIterator{" + + "_backing=" + _backing + + ", _next=" + _next + + '}'; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversedKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversedKvIterator.java new file mode 100644 index 00000000..88b23f30 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversedKvIterator.java @@ -0,0 +1,61 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +public class ReversedKvIterator, V> implements CloseableKvIterator { + private final CloseableKvIterator _backing; + + public ReversedKvIterator(CloseableKvIterator backing) { + _backing = backing; + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasPrev(); + } + + @Override + public Pair next() { + return _backing.prev(); + } + + @Override + public K peekNextKey() { + return _backing.peekPrevKey(); + } + + @Override + public void skip() { + _backing.skipPrev(); + } + + @Override + public K peekPrevKey() { + return _backing.peekNextKey(); + } + + @Override + public Pair prev() { + return _backing.next(); + } + + @Override + public boolean hasPrev() { + return _backing.hasNext(); + } + + @Override + public void skipPrev() { + _backing.skip(); + } + + @Override + public CloseableKvIterator reversed() { + return _backing; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversibleKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversibleKvIterator.java new file mode 100644 index 00000000..a13a063d --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversibleKvIterator.java @@ -0,0 +1,79 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +public abstract class ReversibleKvIterator, V> implements CloseableKvIterator { + protected boolean _goingForward; + + protected abstract void reverse(); + + private void ensureForward() { + if (!_goingForward) { + reverse(); + } + } + + private void ensureBackward() { + if (_goingForward) { + reverse(); + } + } + + abstract protected K peekImpl(); + + abstract protected void skipImpl(); + + abstract protected boolean hasImpl(); + + abstract protected Pair nextImpl(); + + @Override + public K peekNextKey() { + ensureForward(); + return peekImpl(); + } + + @Override + public void skip() { + ensureForward(); + skipImpl(); + } + + + @Override + public boolean hasNext() { + ensureForward(); + return hasImpl(); + } + + @Override + public Pair next() { + ensureForward(); + return nextImpl(); + } + + @Override + public K peekPrevKey() { + ensureBackward(); + return peekImpl(); + } + + @Override + public Pair prev() { + ensureBackward(); + return nextImpl(); + } + + @Override + public boolean hasPrev() { + ensureBackward(); + return hasImpl(); + } + + @Override + public void skipPrev() { + ensureBackward(); + skipImpl(); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Tombstone.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Tombstone.java new file mode 100644 index 00000000..62a7ca1c --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Tombstone.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects; + +import java.util.Optional; + +public record Tombstone() implements MaybeTombstone { + @Override + public Optional opt() { + return Optional.empty(); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java new file mode 100644 index 00000000..e8e01e27 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java @@ -0,0 +1,84 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.List; + +public class TombstoneMergingKvIterator, V> implements CloseableKvIterator { + private final CloseableKvIterator _backing; + private final String _name; + + public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List>> iterators) { + _name = name; + _backing = new PredicateKvIterator<>( + new MergingKvIterator<>(name + "-merging", startType, startKey, iterators), + startType, startKey, + pair -> { + Log.tracev("{0} - Processing pair {1}", _name, pair); + if (pair instanceof Tombstone) { + return null; + } + return ((Data) pair).value(); + }); + } + + @SafeVarargs + public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn>... iterators) { + this(name, startType, startKey, List.of(iterators)); + } + + @Override + public K peekNextKey() { + return _backing.peekNextKey(); + } + + @Override + public void skip() { + _backing.skip(); + } + + @Override + public K peekPrevKey() { + return _backing.peekPrevKey(); + } + + @Override + public Pair prev() { + return _backing.prev(); + } + + @Override + public boolean hasPrev() { + return _backing.hasPrev(); + } + + @Override + public void skipPrev() { + _backing.skipPrev(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasNext(); + } + + @Override + public Pair next() { + return _backing.next(); + } + + @Override + public String toString() { + return "TombstoneMergingKvIterator{" + + "_backing=" + _backing + + ", _name='" + _name + '\'' + + '}'; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java new file mode 100644 index 00000000..2fe54390 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -0,0 +1,102 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.objects.transaction.TransactionHandle; +import com.usatiuk.dhfs.utils.VoidFn; +import io.quarkus.logging.Log; + +import java.util.function.Supplier; + +public interface TransactionManager { + void begin(); + + TransactionHandle commit(); + + void rollback(); + + default T runTries(Supplier supplier, int tries) { + if (current() != null) { + return supplier.get(); + } + + begin(); + T ret; + try { + ret = supplier.get(); + } catch (TxCommitException txCommitException) { + rollback(); + if (tries == 0) { + Log.error("Transaction commit failed", txCommitException); + throw txCommitException; + } + return runTries(supplier, tries - 1); + } catch (Throwable e) { + rollback(); + throw e; + } + try { + commit(); + return ret; + } catch (TxCommitException txCommitException) { + if (tries == 0) { + Log.error("Transaction commit failed", txCommitException); + throw txCommitException; + } + return runTries(supplier, tries - 1); + } + } + + default TransactionHandle runTries(VoidFn fn, int tries) { + if (current() != null) { + fn.apply(); + return new TransactionHandle() { + @Override + public void onFlush(Runnable runnable) { + current().onCommit(runnable); + } + }; + } + + begin(); + try { + fn.apply(); + } catch (TxCommitException txCommitException) { + rollback(); + if (tries == 0) { + Log.error("Transaction commit failed", txCommitException); + throw txCommitException; + } + return runTries(fn, tries - 1); + } catch (Throwable e) { + rollback(); + throw e; + } + try { + return commit(); + } catch (TxCommitException txCommitException) { + if (tries == 0) { + Log.error("Transaction commit failed", txCommitException); + throw txCommitException; + } + return runTries(fn, tries - 1); + } + } + + default TransactionHandle run(VoidFn fn) { + return runTries(fn, 10); + } + + default T run(Supplier supplier) { + return runTries(supplier, 10); + } + + default void executeTx(VoidFn fn) { + run(fn); + } + + default T executeTx(Supplier supplier) { + return run(supplier); + } + + Transaction current(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java new file mode 100644 index 00000000..bf617e19 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -0,0 +1,67 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.objects.transaction.TransactionHandle; +import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class TransactionManagerImpl implements TransactionManager { + private static final ThreadLocal _currentTransaction = new ThreadLocal<>(); + @Inject + JObjectManager jObjectManager; + + @Override + public void begin() { + if (_currentTransaction.get() != null) { + throw new IllegalStateException("Transaction already started"); + } + + Log.trace("Starting transaction"); + var tx = jObjectManager.createTransaction(); + _currentTransaction.set(tx); + } + + @Override + public TransactionHandle commit() { + if (_currentTransaction.get() == null) { + throw new IllegalStateException("No transaction started"); + } + + Log.trace("Committing transaction"); + try { + return jObjectManager.commit(_currentTransaction.get()); + } catch (Throwable e) { + Log.trace("Transaction commit failed", e); + throw e; + } finally { + _currentTransaction.get().close(); + _currentTransaction.remove(); + } + } + + @Override + public void rollback() { + if (_currentTransaction.get() == null) { + throw new IllegalStateException("No transaction started"); + } + + try { + jObjectManager.rollback(_currentTransaction.get()); + } catch (Throwable e) { + Log.error("Transaction rollback failed", e); + throw e; + } finally { + _currentTransaction.get().close(); + _currentTransaction.remove(); + } + } + + @Override + public Transaction current() { + return _currentTransaction.get(); + } +} + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectLocked.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectLocked.java new file mode 100644 index 00000000..ac3a856c --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectLocked.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.TransactionObject; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; + +import java.util.Optional; + +public record TransactionObjectLocked + (Optional data, AutoCloseableNoThrow lock) + implements TransactionObject { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectNoLock.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectNoLock.java new file mode 100644 index 00000000..7672d09a --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectNoLock.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.TransactionObject; + +import java.util.Optional; + +public record TransactionObjectNoLock + (Optional data) + implements TransactionObject { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxCommitException.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxCommitException.java new file mode 100644 index 00000000..73e488d6 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxCommitException.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +public class TxCommitException extends RuntimeException { + public TxCommitException(String message) { + super(message); + } + + public TxCommitException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java new file mode 100644 index 00000000..2fb14558 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -0,0 +1,490 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.persistence.TxManifestObj; +import com.usatiuk.dhfs.objects.transaction.TxRecord; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.PSortedMap; +import org.pcollections.TreePMap; + +import javax.annotation.Nonnull; +import java.util.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +@ApplicationScoped +public class WritebackObjectPersistentStore { + private final LinkedList _pendingBundles = new LinkedList<>(); + + private final AtomicReference> _pendingWrites = new AtomicReference<>(TreePMap.empty()); + private final ReentrantReadWriteLock _pendingWritesVersionLock = new ReentrantReadWriteLock(); + private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); + + private final Object _flushWaitSynchronizer = new Object(); + private final AtomicLong _lastWrittenTx = new AtomicLong(-1); + private final AtomicLong _counter = new AtomicLong(); + private final AtomicLong _lastCommittedTx = new AtomicLong(-1); + private final AtomicLong _waitedTotal = new AtomicLong(0); + @Inject + CachingObjectPersistentStore cachedStore; + @ConfigProperty(name = "dhfs.objects.writeback.limit") + long sizeLimit; + private long currentSize = 0; + private ExecutorService _writebackExecutor; + private ExecutorService _statusExecutor; + private volatile boolean _ready = false; + + void init(@Observes @Priority(110) StartupEvent event) { + { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("tx-writeback-%d") + .build(); + + _writebackExecutor = Executors.newSingleThreadExecutor(factory); + _writebackExecutor.submit(this::writeback); + } + + _statusExecutor = Executors.newSingleThreadExecutor(); + _statusExecutor.submit(() -> { + try { + while (true) { + Thread.sleep(1000); + if (currentSize > 0) + Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB"); + } + } catch (InterruptedException ignored) { + } + }); + _counter.set(cachedStore.getLastTxId()); + _lastCommittedTx.set(cachedStore.getLastTxId()); + _ready = true; + } + + void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { + Log.info("Waiting for all transactions to drain"); + + synchronized (_flushWaitSynchronizer) { + _ready = false; + while (currentSize > 0) { + _flushWaitSynchronizer.wait(); + } + } + + _writebackExecutor.shutdownNow(); + Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); + } + + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); + } + + private void writeback() { + while (!Thread.interrupted()) { + try { + TxBundle bundle = new TxBundle(0); + synchronized (_pendingBundles) { + while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) + _pendingBundles.wait(); + + long diff = 0; + while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var toCompress = _pendingBundles.poll(); + diff -= toCompress.calculateTotalSize(); + bundle.compress(toCompress); + } + diff += bundle.calculateTotalSize(); + synchronized (_flushWaitSynchronizer) { + currentSize += diff; + } + } + + var toWrite = new ArrayList>(); + var toDelete = new ArrayList(); + + for (var e : bundle._entries.values()) { + switch (e) { + case TxBundle.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> { + Log.trace("Writing new " + key); + toWrite.add(Pair.of(key, data)); + } + case TxBundle.DeletedEntry(JObjectKey key) -> { + Log.trace("Deleting from persistent storage " + key); + toDelete.add(key); + } + default -> throw new IllegalStateException("Unexpected value: " + e); + } + } + + cachedStore.commitTx( + new TxManifestObj<>( + Collections.unmodifiableList(toWrite), + Collections.unmodifiableList(toDelete) + ), bundle.getId()); + + Log.trace("Bundle " + bundle.getId() + " committed"); + + // Remove from pending writes, after real commit + // As we are the only writers to _pendingWrites, no need to synchronize with iterator creation + // if they get the older version, as it will still contain all the new changes + synchronized (_pendingBundles) { + var curPw = _pendingWrites.get(); + for (var e : bundle._entries.values()) { + var cur = curPw.get(e.key()); + if (cur.bundleId() <= bundle.getId()) + curPw = curPw.minus(e.key()); + } + _pendingWrites.set(curPw); + // No need to increment version + } + + List> callbacks = new ArrayList<>(); + synchronized (_notFlushedBundles) { + _lastWrittenTx.set(bundle.getId()); + while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { + callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); + } + } + callbacks.forEach(l -> l.forEach(Runnable::run)); + + synchronized (_flushWaitSynchronizer) { + currentSize -= bundle.calculateTotalSize(); + // FIXME: + if (currentSize <= sizeLimit || !_ready) + _flushWaitSynchronizer.notifyAll(); + } + } catch (InterruptedException ignored) { + } catch (Exception e) { + Log.error("Uncaught exception in writeback", e); + } catch (Throwable o) { + Log.error("Uncaught THROWABLE in writeback", o); + } + } + Log.info("Writeback thread exiting"); + } + + + public TxBundle createBundle() { + verifyReady(); + boolean wait = false; + while (true) { + if (wait) { + synchronized (_flushWaitSynchronizer) { + long started = System.currentTimeMillis(); + while (currentSize > sizeLimit) { + try { + _flushWaitSynchronizer.wait(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + long waited = System.currentTimeMillis() - started; + _waitedTotal.addAndGet(waited); + if (Log.isTraceEnabled()) + Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); + wait = false; + } + } + synchronized (_pendingBundles) { + synchronized (_flushWaitSynchronizer) { + if (currentSize > sizeLimit) { + if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var target = _pendingBundles.poll(); + + long diff = -target.calculateTotalSize(); + while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var toCompress = _pendingBundles.poll(); + diff -= toCompress.calculateTotalSize(); + target.compress(toCompress); + } + diff += target.calculateTotalSize(); + currentSize += diff; + _pendingBundles.addFirst(target); + } + } + + if (currentSize > sizeLimit) { + wait = true; + continue; + } + } + synchronized (_notFlushedBundles) { + var bundle = new TxBundle(_counter.incrementAndGet()); + _pendingBundles.addLast(bundle); + _notFlushedBundles.put(bundle.getId(), bundle); + return bundle; + } + } + } + } + + public void commitBundle(TxBundle bundle) { + verifyReady(); + _pendingWritesVersionLock.writeLock().lock(); + try { + synchronized (_pendingBundles) { + var curPw = _pendingWrites.get(); + for (var e : ((TxBundle) bundle)._entries.values()) { + switch (e) { + case TxBundle.CommittedEntry c -> { + curPw = curPw.plus(c.key(), new PendingWrite(c.data, bundle.getId())); + } + case TxBundle.DeletedEntry d -> { + curPw = curPw.plus(d.key(), new PendingDelete(d.key, bundle.getId())); + } + default -> throw new IllegalStateException("Unexpected value: " + e); + } + } + // Now, make the changes visible to new iterators + _pendingWrites.set(curPw); + ((TxBundle) bundle).setReady(); + if (_pendingBundles.peek() == bundle) + _pendingBundles.notify(); + synchronized (_flushWaitSynchronizer) { + currentSize += ((TxBundle) bundle).calculateTotalSize(); + } + } + assert bundle.getId() > _lastCommittedTx.get(); + _lastCommittedTx.set(bundle.getId()); + } finally { + _pendingWritesVersionLock.writeLock().unlock(); + } + } + + public void dropBundle(TxBundle bundle) { + verifyReady(); + synchronized (_pendingBundles) { + Log.warn("Dropped bundle: " + bundle); + _pendingBundles.remove((TxBundle) bundle); + synchronized (_flushWaitSynchronizer) { + currentSize -= ((TxBundle) bundle).calculateTotalSize(); + } + } + } + + public void fence(long bundleId) { + var latch = new CountDownLatch(1); + asyncFence(bundleId, latch::countDown); + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + public void asyncFence(long bundleId, Runnable fn) { + verifyReady(); + if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); + if (_lastWrittenTx.get() >= bundleId) { + fn.run(); + return; + } + synchronized (_notFlushedBundles) { + if (_lastWrittenTx.get() >= bundleId) { + fn.run(); + return; + } + _notFlushedBundles.get(bundleId).addCallback(fn); + } + } + + private static class TxBundle { + private final LinkedHashMap _entries = new LinkedHashMap<>(); + private final ArrayList _callbacks = new ArrayList<>(); + private long _txId; + private volatile boolean _ready = false; + private long _size = -1; + private boolean _wasCommitted = false; + + private TxBundle(long txId) { + _txId = txId; + } + + public long getId() { + return _txId; + } + + public void setReady() { + _ready = true; + } + + public void addCallback(Runnable callback) { + synchronized (_callbacks) { + if (_wasCommitted) throw new IllegalStateException(); + _callbacks.add(callback); + } + } + + public List setCommitted() { + synchronized (_callbacks) { + _wasCommitted = true; + return Collections.unmodifiableList(_callbacks); + } + } + + public void commit(JDataVersionedWrapper obj) { + synchronized (_entries) { + _entries.put(obj.data().key(), new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize())); + } + } + + public void delete(JObjectKey obj) { + synchronized (_entries) { + _entries.put(obj, new DeletedEntry(obj)); + } + } + + public long calculateTotalSize() { + if (_size >= 0) return _size; + _size = _entries.values().stream().mapToInt(BundleEntry::size).sum(); + return _size; + } + + public void compress(TxBundle other) { + if (_txId >= other._txId) + throw new IllegalArgumentException("Compressing an older bundle into newer"); + + _txId = other._txId; + _size = -1; + + _entries.putAll(other._entries); + } + + private interface BundleEntry { + JObjectKey key(); + + int size(); + } + + private record CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) + implements BundleEntry { + } + + private record DeletedEntry(JObjectKey key) + implements BundleEntry { + + public int size() { + return 64; + } + } + } + + public Optional getPendingWrite(JObjectKey key) { + synchronized (_pendingBundles) { + return Optional.ofNullable(_pendingWrites.get().get(key)); + } + } + + @Nonnull + public Optional readObject(JObjectKey name) { + var pending = getPendingWrite(name).orElse(null); + return switch (pending) { + case PendingWrite write -> Optional.of(write.data()); + case PendingDelete ignored -> Optional.empty(); + case null -> cachedStore.readObject(name); + default -> throw new IllegalStateException("Unexpected value: " + pending); + }; + } + + public interface VerboseReadResult { + } + + public record VerboseReadResultPersisted(Optional data) implements VerboseReadResult { + } + + public record VerboseReadResultPending(PendingWriteEntry pending) implements VerboseReadResult { + } + + @Nonnull + public VerboseReadResult readObjectVerbose(JObjectKey key) { + var pending = getPendingWrite(key).orElse(null); + if (pending != null) { + return new VerboseReadResultPending(pending); + } + return new VerboseReadResultPersisted(cachedStore.readObject(key)); + } + + /** + * @param commitLocked - a function that will be called with a Consumer of a new transaction id, + * that will commit the transaction the changes in the store will be visible to new transactions + * only after the runnable is called + */ + public Consumer commitTx(Collection> writes, BiConsumer commitLocked) { + var bundle = createBundle(); + long bundleId = bundle.getId(); + try { + for (var action : writes) { + switch (action) { + case TxRecord.TxObjectRecordWrite write -> { + Log.trace("Flushing object " + write.key()); + bundle.commit(new JDataVersionedWrapper(write.data(), bundleId)); + } + case TxRecord.TxObjectRecordDeleted deleted -> { + Log.trace("Deleting object " + deleted.key()); + bundle.delete(deleted.key()); + } + default -> { + throw new TxCommitException("Unexpected value: " + action.key()); + } + } + } + } catch (Throwable t) { + dropBundle(bundle); + throw new TxCommitException(t.getMessage(), t); + } + + + Log.tracef("Committing transaction %d to storage", bundleId); + commitLocked.accept(bundleId, () -> { + commitBundle(bundle); + }); + + return r -> asyncFence(bundleId, r); + } + + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + // Invalidated by commitBundle, but might return data after it has been really committed + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + Log.tracev("Getting writeback iterator: {0}, {1}", start, key); + _pendingWritesVersionLock.readLock().lock(); + try { + var curPending = _pendingWrites.get(); + return new TombstoneMergingKvIterator<>("writeback-ps", start, key, + (tS, tK) -> new MappingKvIterator<>( + new NavigableMapKvIterator<>(curPending, tS, tK), + e -> switch (e) { + case PendingWrite pw -> new Data<>(pw.data()); + case PendingDelete d -> new Tombstone<>(); + default -> throw new IllegalStateException("Unexpected value: " + e); + }), + (tS, tK) -> cachedStore.getIterator(tS, tK)); + } finally { + _pendingWritesVersionLock.readLock().unlock(); + } + } + + public long getLastTxId() { + _pendingWritesVersionLock.readLock().lock(); + try { + return _lastCommittedTx.get(); + } finally { + _pendingWritesVersionLock.readLock().unlock(); + } + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java new file mode 100644 index 00000000..c3bd22dd --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -0,0 +1,231 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.utils.DataLocker; +import io.quarkus.logging.Log; +import io.quarkus.runtime.Startup; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.TreePMap; + +import javax.annotation.Nonnull; +import java.util.LinkedHashMap; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +@ApplicationScoped +public class CachingObjectPersistentStore { + private final LinkedHashMap _cache = new LinkedHashMap<>(8, 0.75f, true); + private TreePMap _sortedCache = TreePMap.empty(); + private long _cacheVersion = 0; + + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); + private final DataLocker _locker = new DataLocker(); + + @Inject + SerializingObjectPersistentStore delegate; + @ConfigProperty(name = "dhfs.objects.lru.limit") + long sizeLimit; + @ConfigProperty(name = "dhfs.objects.lru.print-stats") + boolean printStats; + + private long _curSize = 0; + private long _evict = 0; + + private ExecutorService _statusExecutor = null; + + @Startup + void init() { + if (printStats) { + _statusExecutor = Executors.newSingleThreadExecutor(); + _statusExecutor.submit(() -> { + try { + while (true) { + Thread.sleep(10000); + if (_curSize > 0) + Log.info("Cache status: size=" + _curSize / 1024 / 1024 + "MB" + " evicted=" + _evict); + _evict = 0; + } + } catch (InterruptedException ignored) { + } + }); + } + } + + private void put(JObjectKey key, Optional obj) { +// Log.tracev("Adding {0} to cache: {1}", key, obj); + _lock.writeLock().lock(); + try { + int size = obj.map(o -> o.data().estimateSize()).orElse(16); + + _curSize += size; + var entry = new CacheEntry(obj.>map(Data::new).orElse(new Tombstone<>()), size); + var old = _cache.putLast(key, entry); + + _sortedCache = _sortedCache.plus(key, entry); + if (old != null) + _curSize -= old.size(); + + while (_curSize >= sizeLimit) { + var del = _cache.pollFirstEntry(); + _sortedCache = _sortedCache.minus(del.getKey()); + _curSize -= del.getValue().size(); + _evict++; + } + } finally { + _lock.writeLock().unlock(); + } + } + + @Nonnull + public Optional readObject(JObjectKey name) { + try (var lock = _locker.lock(name)) { + _lock.readLock().lock(); + try { + var got = _cache.get(name); + if (got != null) { + return got.object().opt(); + } + } finally { + _lock.readLock().unlock(); + } + + // TODO: This is possibly racy +// var got = delegate.readObject(name); +// put(name, got); + return delegate.readObject(name); + } + } + + public void commitTx(TxManifestObj names, long txId) { + var serialized = delegate.prepareManifest(names); + Log.tracev("Committing: {0} writes, {1} deletes", names.written().size(), names.deleted().size()); + delegate.commitTx(serialized, txId, (commit) -> { + _lock.writeLock().lock(); + try { + // Make the changes visible atomically both in cache and in the underlying store + for (var write : names.written()) { + put(write.getLeft(), Optional.of(write.getRight())); + } + for (var del : names.deleted()) { + put(del, Optional.empty()); + } + ++_cacheVersion; + commit.run(); + } finally { + _lock.writeLock().unlock(); + } + }); + Log.tracev("Committed: {0} writes, {1} deletes", names.written().size(), names.deleted().size()); + } + + + private class CachingKvIterator implements CloseableKvIterator { + private final CloseableKvIterator _delegate; + // This should be created under lock + private final long _curCacheVersion = _cacheVersion; + + private CachingKvIterator(CloseableKvIterator delegate) { + _delegate = delegate; + } + + @Override + public JObjectKey peekNextKey() { + return _delegate.peekNextKey(); + } + + @Override + public void skip() { + _delegate.skip(); + } + + @Override + public void close() { + _delegate.close(); + } + + @Override + public boolean hasNext() { + return _delegate.hasNext(); + } + + @Override + public JObjectKey peekPrevKey() { + return _delegate.peekPrevKey(); + } + + private void maybeCache(Pair prev) { + _lock.writeLock().lock(); + try { + if (_cacheVersion != _curCacheVersion) { + Log.tracev("Not caching: {0}", prev); + } else { + Log.tracev("Caching: {0}", prev); + put(prev.getKey(), Optional.of(prev.getValue())); + } + } finally { + _lock.writeLock().unlock(); + } + } + + @Override + public Pair prev() { + var prev = _delegate.prev(); + maybeCache(prev); + return prev; + } + + @Override + public boolean hasPrev() { + return _delegate.hasPrev(); + } + + @Override + public void skipPrev() { + _delegate.skipPrev(); + } + + @Override + public Pair next() { + var next = _delegate.next(); + maybeCache(next); + return next; + } + } + + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + // Warning: it has a nasty side effect of global caching, so in this case don't even call next on it, + // if some objects are still in writeback + public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { + _lock.readLock().lock(); + try { + Log.tracev("Getting cache iterator: {0}, {1}", start, key); + var curSortedCache = _sortedCache; + return new MergingKvIterator<>("cache", start, key, + (mS, mK) + -> new MappingKvIterator<>( + new NavigableMapKvIterator<>(curSortedCache, mS, mK), + e -> { + Log.tracev("Taken from cache: {0}", e); + return e.object(); + } + ), + (mS, mK) + -> new MappingKvIterator<>(new CachingKvIterator(delegate.getIterator(mS, mK)), Data::new)); + } finally { + _lock.readLock().unlock(); + } + } + + private record CacheEntry(MaybeTombstone object, long size) { + } + + public long getLastTxId() { + return delegate.getLastCommitId(); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java new file mode 100644 index 00000000..338c025f --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects.persistence; + +public enum IteratorStart { + LT, + LE, + GT, + GE, +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java new file mode 100644 index 00000000..080b51ab --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java @@ -0,0 +1,337 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.KeyPredicateKvIterator; +import com.usatiuk.dhfs.objects.ReversibleKvIterator; +import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; +import io.quarkus.arc.properties.IfBuildProperty; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.lmdbjava.*; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.lang.ref.Cleaner; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.*; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; + +import static org.lmdbjava.DbiFlags.MDB_CREATE; +import static org.lmdbjava.Env.create; + +@ApplicationScoped +@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "lmdb") +public class LmdbObjectPersistentStore implements ObjectPersistentStore { + private final Path _root; + private Env _env; + private Dbi _db; + private boolean _ready = false; + + private long _lastTxId = 0; + + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); + + private static final String DB_NAME = "objects"; + private static final byte[] DB_VER_OBJ_NAME = "__DB_VER_OBJ".getBytes(StandardCharsets.UTF_8); + + public LmdbObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { + _root = Path.of(root).resolve("objects"); + } + + void init(@Observes @Priority(100) StartupEvent event) throws IOException { + if (!_root.toFile().exists()) { + Log.info("Initializing with root " + _root); + _root.toFile().mkdirs(); + } + _env = create() + .setMapSize(1_000_000_000_000L) + .setMaxDbs(1) + .open(_root.toFile(), EnvFlags.MDB_NOTLS); + _db = _env.openDbi(DB_NAME, MDB_CREATE); + + var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length); + bb.put(DB_VER_OBJ_NAME); + bb.flip(); + + try (Txn txn = _env.txnRead()) { + var value = _db.get(txn, bb); + if (value != null) { + var ver = value.getLong(); + Log.infov("Read version: {0}", ver); + _lastTxId = ver; + } + } + + _ready = true; + } + + void shutdown(@Observes @Priority(900) ShutdownEvent event) throws IOException { + _ready = false; + _db.close(); + _env.close(); + } + + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Wrong service order!"); + } + + @Nonnull + @Override + public Collection findAllObjects() { +// try (Txn txn = env.txnRead()) { +// try (var cursor = db.openCursor(txn)) { +// var keys = List.of(); +// while (cursor.next()) { +// keys.add(JObjectKey.fromBytes(cursor.key())); +// } +// return keys; +// } +// } + return List.of(); + } + + + @Nonnull + @Override + public Optional readObject(JObjectKey name) { + verifyReady(); + try (Txn txn = _env.txnRead()) { + var value = _db.get(txn, name.toByteBuffer()); + return Optional.ofNullable(value).map(ByteString::copyFrom); + } + } + + private class LmdbKvIterator extends ReversibleKvIterator { + private final Txn _txn = _env.txnRead(); + private final Cursor _cursor = _db.openCursor(_txn); + private boolean _hasNext = false; + + private static final Cleaner CLEANER = Cleaner.create(); + private final MutableObject _closed = new MutableObject<>(false); + private final Exception _allocationStacktrace = new Exception(); + + LmdbKvIterator(IteratorStart start, JObjectKey key) { + _goingForward = true; + var closedRef = _closed; + var bt = _allocationStacktrace; + CLEANER.register(this, () -> { + if (!closedRef.getValue()) { + Log.error("Iterator was not closed before GC, allocated at: {0}", bt); + System.exit(-1); + } + }); + + verifyReady(); + if (!_cursor.get(key.toByteBuffer(), GetOp.MDB_SET_RANGE)) { + return; + } + + var got = JObjectKey.fromByteBuffer(_cursor.key()); + _cursor.key().flip(); + var cmp = got.compareTo(key); + + assert cmp >= 0; + + _hasNext = true; + + if (cmp == 0) { + switch (start) { + case LT -> { + _hasNext = _cursor.prev(); + if (!_hasNext) { + _hasNext = _cursor.first(); + } + } + case GT -> { + _hasNext = _cursor.next(); + } + case LE, GE -> { + } + } + } else { + switch (start) { + case LT, LE -> { + _hasNext = _cursor.prev(); + if (!_hasNext) { + _hasNext = _cursor.first(); + } + } + case GT, GE -> { + } + } + } + + var realGot = JObjectKey.fromByteBuffer(_cursor.key()); + _cursor.key().flip(); + + switch (start) { + case LT -> { +// assert !_hasNext || realGot.compareTo(key) < 0; + } + case LE -> { +// assert !_hasNext || realGot.compareTo(key) <= 0; + } + case GT -> { + assert !_hasNext || realGot.compareTo(key) > 0; + } + case GE -> { + assert !_hasNext || realGot.compareTo(key) >= 0; + } + } + Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext); + } + + @Override + public void close() { + if (_closed.getValue()) { + return; + } + _closed.setValue(true); + _cursor.close(); + _txn.close(); + } + + @Override + protected void reverse() { + if (_hasNext) { + if (_goingForward) { + _hasNext = _cursor.prev(); + } else { + _hasNext = _cursor.next(); + } + } else { + if (_goingForward) { + _hasNext = _cursor.last(); + } else { + _hasNext = _cursor.first(); + } + } + _goingForward = !_goingForward; + } + + @Override + protected JObjectKey peekImpl() { + if (!_hasNext) { + throw new NoSuchElementException("No more elements"); + } + var ret = JObjectKey.fromByteBuffer(_cursor.key()); + _cursor.key().flip(); + return ret; + } + + @Override + protected void skipImpl() { + if (_goingForward) + _hasNext = _cursor.next(); + else + _hasNext = _cursor.prev(); + } + + @Override + protected boolean hasImpl() { + return _hasNext; + } + + @Override + protected Pair nextImpl() { + if (!_hasNext) { + throw new NoSuchElementException("No more elements"); + } + var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), ByteString.copyFrom(_cursor.val())); + if (_goingForward) + _hasNext = _cursor.next(); + else + _hasNext = _cursor.prev(); + Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext); + return ret; + } + } + + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new KeyPredicateKvIterator<>(new LmdbKvIterator(start, key), start, key, (k) -> !Arrays.equals(k.name().getBytes(StandardCharsets.UTF_8), DB_VER_OBJ_NAME)); + } + + @Override + public void commitTx(TxManifestRaw names, long txId, Consumer commitLocked) { + verifyReady(); + try (Txn txn = _env.txnWrite()) { + for (var written : names.written()) { + // TODO: + var bb = UninitializedByteBuffer.allocateUninitialized(written.getValue().size()); + bb.put(written.getValue().asReadOnlyByteBuffer()); + bb.flip(); + _db.put(txn, written.getKey().toByteBuffer(), bb); + } + for (JObjectKey key : names.deleted()) { + _db.delete(txn, key.toByteBuffer()); + } + + var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length); + bb.put(DB_VER_OBJ_NAME); + bb.flip(); + var bbData = ByteBuffer.allocateDirect(8); + + commitLocked.accept(() -> { + _lock.writeLock().lock(); + try { + var realTxId = txId; + if (realTxId == -1) + realTxId = _lastTxId + 1; + + assert realTxId > _lastTxId; + _lastTxId = realTxId; + + bbData.putLong(realTxId); + bbData.flip(); + _db.put(txn, bb, bbData); + + txn.commit(); + } finally { + _lock.writeLock().unlock(); + } + }); + } + } + + @Override + public long getTotalSpace() { + verifyReady(); + return _root.toFile().getTotalSpace(); + } + + @Override + public long getFreeSpace() { + verifyReady(); + return _root.toFile().getFreeSpace(); + } + + @Override + public long getUsableSpace() { + verifyReady(); + return _root.toFile().getUsableSpace(); + } + + @Override + public long getLastCommitId() { + _lock.readLock().lock(); + try { + return _lastTxId; + } finally { + _lock.readLock().unlock(); + } + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java new file mode 100644 index 00000000..0cf640bf --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -0,0 +1,90 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.NavigableMapKvIterator; +import io.quarkus.arc.properties.IfBuildProperty; +import jakarta.enterprise.context.ApplicationScoped; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Optional; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; + +@ApplicationScoped +@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory") +public class MemoryObjectPersistentStore implements ObjectPersistentStore { + private final ConcurrentSkipListMap _objects = new ConcurrentSkipListMap<>(); + private long _lastCommitId = 0; + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); + + @Nonnull + @Override + public Collection findAllObjects() { + synchronized (this) { + return _objects.keySet(); + } + } + + @Nonnull + @Override + public Optional readObject(JObjectKey name) { + synchronized (this) { + return Optional.ofNullable(_objects.get(name)); + } + } + + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new NavigableMapKvIterator<>(_objects, start, key); + } + + @Override + public void commitTx(TxManifestRaw names, long txId, Consumer commitLocked) { + synchronized (this) { + for (var written : names.written()) { + _objects.put(written.getKey(), written.getValue()); + } + for (JObjectKey key : names.deleted()) { + _objects.remove(key); + } + commitLocked.accept(() -> { + _lock.writeLock().lock(); + try { + assert txId > _lastCommitId; + _lastCommitId = txId; + } finally { + _lock.writeLock().unlock(); + } + }); + } + } + + @Override + public long getTotalSpace() { + return 0; + } + + @Override + public long getFreeSpace() { + return 0; + } + + @Override + public long getUsableSpace() { + return 0; + } + + @Override + public long getLastCommitId() { + _lock.readLock().lock(); + try { + return _lastCommitId; + } finally { + _lock.readLock().unlock(); + } + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java new file mode 100644 index 00000000..bcb08401 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -0,0 +1,38 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JObjectKey; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Optional; +import java.util.function.Consumer; + +// Persistent storage of objects +// All changes are written as sequential transactions +public interface ObjectPersistentStore { + @Nonnull + Collection findAllObjects(); + + @Nonnull + Optional readObject(JObjectKey name); + + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); + + /** + * @param commitLocked - a function that will be called with a Runnable that will commit the transaction + * the changes in the store will be visible to new transactions only after the runnable is called + */ + void commitTx(TxManifestRaw names, long txId, Consumer commitLocked); + + long getTotalSpace(); + + long getFreeSpace(); + + long getUsableSpace(); + + long getLastCommitId(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java new file mode 100644 index 00000000..f439731e --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java @@ -0,0 +1,56 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.*; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Optional; +import java.util.function.Consumer; + +@ApplicationScoped +public class SerializingObjectPersistentStore { + @Inject + ObjectSerializer serializer; + + @Inject + ObjectPersistentStore delegateStore; + + @Nonnull + Collection findAllObjects() { + return delegateStore.findAllObjects(); + } + + @Nonnull + Optional readObject(JObjectKey name) { + return delegateStore.readObject(name).map(serializer::deserialize); + } + + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new MappingKvIterator<>(delegateStore.getIterator(start, key), d -> serializer.deserialize(d)); + } + + public TxManifestRaw prepareManifest(TxManifestObj names) { + return new TxManifestRaw( + names.written().stream() + .map(e -> Pair.of(e.getKey(), serializer.serialize(e.getValue()))) + .toList() + , names.deleted()); + } + +// void commitTx(TxManifestObj names, Consumer commitLocked) { +// delegateStore.commitTx(prepareManifest(names), commitLocked); +// } + + void commitTx(TxManifestRaw names, long txId, Consumer commitLocked) { + delegateStore.commitTx(names, txId, commitLocked); + } + + long getLastCommitId() { + return delegateStore.getLastCommitId(); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestObj.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestObj.java new file mode 100644 index 00000000..19bc6e36 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestObj.java @@ -0,0 +1,12 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.JObjectKey; +import org.apache.commons.lang3.tuple.Pair; + +import java.io.Serializable; +import java.util.Collection; + +// FIXME: Serializable +public record TxManifestObj(Collection> written, + Collection deleted) implements Serializable { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestRaw.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestRaw.java new file mode 100644 index 00000000..fd7ec742 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestRaw.java @@ -0,0 +1,13 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.JObjectKey; +import org.apache.commons.lang3.tuple.Pair; + +import java.io.Serializable; +import java.util.Collection; + +// FIXME: Serializable +public record TxManifestRaw(Collection> written, + Collection deleted) implements Serializable { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java new file mode 100644 index 00000000..e783a2cf --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects.snapshot; + +public interface SnapshotEntry { + long whenToRemove(); + + SnapshotEntry withWhenToRemove(long whenToRemove); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java new file mode 100644 index 00000000..71113d45 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects.snapshot; + +public record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry { + @Override + public SnapshotEntryDeleted withWhenToRemove(long whenToRemove) { + return new SnapshotEntryDeleted(whenToRemove); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java new file mode 100644 index 00000000..98cfbefc --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; + +public record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { + @Override + public SnapshotEntryObject withWhenToRemove(long whenToRemove) { + return new SnapshotEntryObject(data, whenToRemove); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java new file mode 100644 index 00000000..dd8a5f07 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java @@ -0,0 +1,15 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.JObjectKey; + +import javax.annotation.Nonnull; +import java.util.Comparator; + +public record SnapshotKey(JObjectKey key, long version) implements Comparable { + @Override + public int compareTo(@Nonnull SnapshotKey o) { + return Comparator.comparing(SnapshotKey::key) + .thenComparing(SnapshotKey::version) + .compare(this, o); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java new file mode 100644 index 00000000..1d045665 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java @@ -0,0 +1,185 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.NavigableMap; +import java.util.NoSuchElementException; +import java.util.Optional; + +// TODO: test me +public class SnapshotKvIterator extends ReversibleKvIterator> { + private final NavigableMap _objects; + private final long _version; + private final CloseableKvIterator _backing; + private Pair> _next = null; + + public SnapshotKvIterator(NavigableMap objects, long version, IteratorStart start, JObjectKey startKey) { + _objects = objects; + _version = version; + _goingForward = true; + _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, Long.MIN_VALUE)); + fill(); + + boolean shouldGoBack = false; + if (start == IteratorStart.LE) { + if (_next == null || _next.getKey().compareTo(startKey) > 0) { + shouldGoBack = true; + } + } else if (start == IteratorStart.LT) { + if (_next == null || _next.getKey().compareTo(startKey) >= 0) { + shouldGoBack = true; + } + } + + if (shouldGoBack && _backing.hasPrev()) { + _goingForward = false; + _backing.skipPrev(); + fill(); + _goingForward = true; + _backing.skip(); + fill(); + } + + + switch (start) { + case LT -> { +// assert _next == null || _next.getKey().compareTo(startKey) < 0; + } + case LE -> { +// assert _next == null || _next.getKey().compareTo(startKey) <= 0; + } + case GT -> { + assert _next == null || _next.getKey().compareTo(startKey) > 0; + } + case GE -> { + assert _next == null || _next.getKey().compareTo(startKey) >= 0; + } + } + + } + + private void fillPrev(JObjectKey ltKey) { + if (ltKey != null) + while (_backing.hasPrev() && _backing.peekPrevKey().key().equals(ltKey)) { + Log.tracev("Snapshot skipping prev: {0}", _backing.peekPrevKey()); + _backing.skipPrev(); + } + + _next = null; + + while (_backing.hasPrev() && _next == null) { + var prev = _backing.prev(); + if (prev.getKey().version() <= _version && prev.getValue().whenToRemove() > _version) { + Log.tracev("Snapshot skipping prev: {0} (too new)", prev); + _next = switch (prev.getValue()) { + case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> + Pair.of(prev.getKey().key(), new Data<>(data)); + case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(prev.getKey().key(), new Tombstone<>()); + default -> throw new IllegalStateException("Unexpected value: " + prev.getValue()); + }; + } + } + + if (_next != null) { + if (_next.getValue() instanceof Data( + JDataVersionedWrapper value + )) { + assert value.version() <= _version; + } + } + } + + private void fillNext() { + _next = null; + while (_backing.hasNext() && _next == null) { + var next = _backing.next(); + var nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; + while (nextNextKey != null && nextNextKey.key().equals(next.getKey().key()) && nextNextKey.version() <= _version) { + Log.tracev("Snapshot skipping next: {0} (too old)", next); + next = _backing.next(); + nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; + } + // next.getValue().whenToRemove() >=_id, read tx might have same snapshot id as some write tx + if (next.getKey().version() <= _version && next.getValue().whenToRemove() > _version) { + _next = switch (next.getValue()) { + case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> + Pair.of(next.getKey().key(), new Data<>(data)); + case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(next.getKey().key(), new Tombstone<>()); + default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); + }; + } + if (_next != null) { + if (_next.getValue() instanceof Data( + JDataVersionedWrapper value + )) { + assert value.version() <= _version; + } + } + } + } + + private void fill() { + if (_goingForward) + fillNext(); + else + fillPrev(Optional.ofNullable(_next).map(Pair::getKey).orElse(null)); + } + + @Override + protected void reverse() { + _goingForward = !_goingForward; + + boolean wasAtEnd = _next == null; + + if (_goingForward && !wasAtEnd) + _backing.skip(); + else if (!_goingForward && !wasAtEnd) + _backing.skipPrev(); + + fill(); + } + + @Override + public JObjectKey peekImpl() { + if (_next == null) + throw new NoSuchElementException(); + return _next.getKey(); + } + + @Override + public void skipImpl() { + if (_next == null) + throw new NoSuchElementException(); + fill(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasImpl() { + return _next != null; + } + + @Override + public Pair> nextImpl() { + if (_next == null) + throw new NoSuchElementException("No more elements"); + var ret = _next; + if (ret.getValue() instanceof Data( + JDataVersionedWrapper value + )) { + assert value.version() <= _version; + } + + fill(); + Log.tracev("Read: {0}, next: {1}", ret, _next); + return ret; + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java new file mode 100644 index 00000000..77b36f46 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java @@ -0,0 +1,342 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.transaction.TxRecord; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.TreePMap; + +import javax.annotation.Nonnull; +import java.lang.ref.Cleaner; +import java.util.*; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; + +@ApplicationScoped +public class SnapshotManager { + @Inject + WritebackObjectPersistentStore writebackStore; + + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); + + @ConfigProperty(name = "dhfs.objects.persistence.snapshot-extra-checks") + boolean extraChecks; + + private long _lastSnapshotId = 0; + private long _lastAliveSnapshotId = -1; + + private final Queue _snapshotIds = new ArrayDeque<>(); + private TreePMap _objects = TreePMap.empty(); + private final TreeMap> _snapshotBounds = new TreeMap<>(); + private final HashMap _snapshotRefCounts = new HashMap<>(); + + private void verify() { + assert _snapshotIds.isEmpty() == (_lastAliveSnapshotId == -1); + assert _snapshotIds.isEmpty() || _snapshotIds.peek() == _lastAliveSnapshotId; + } + + // This should not be called for the same objects concurrently + public Consumer commitTx(Collection> writes) { +// _lock.writeLock().lock(); +// try { +// if (!_snapshotIds.isEmpty()) { +// verify(); + HashMap newEntries = new HashMap<>(); + for (var action : writes) { + var current = writebackStore.readObjectVerbose(action.key()); + // Add to snapshot the previous visible version of the replaced object + // I.e. should be visible to all transactions with id <= id + // and at least as its corresponding version + Pair newSnapshotEntry = switch (current) { + case WritebackObjectPersistentStore.VerboseReadResultPersisted( + Optional data + ) -> Pair.of(new SnapshotKey(action.key(), data.map(JDataVersionedWrapper::version).orElse(-1L)), + data.map(o -> new SnapshotEntryObject(o, -1)).orElse(new SnapshotEntryDeleted(-1))); + case WritebackObjectPersistentStore.VerboseReadResultPending( + PendingWriteEntry pending + ) -> { + yield switch (pending) { + case PendingWrite write -> + Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data(), -1)); + case PendingDelete delete -> + Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted(-1)); + default -> throw new IllegalStateException("Unexpected value: " + pending); + }; + } + default -> throw new IllegalStateException("Unexpected value: " + current); + }; + + + Log.tracev("Adding snapshot entry {0}", newSnapshotEntry); + + newEntries.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); + } + + _lock.writeLock().lock(); + try { + return writebackStore.commitTx(writes, (id, commit) -> { + if (!_snapshotIds.isEmpty()) { + assert id > _lastSnapshotId; + for (var newSnapshotEntry : newEntries.entrySet()) { + assert newSnapshotEntry.getKey().version() < id; + var realNewSnapshotEntry = newSnapshotEntry.getValue().withWhenToRemove(id); + if (realNewSnapshotEntry instanceof SnapshotEntryObject re) { + assert re.data().version() <= newSnapshotEntry.getKey().version(); + } + _objects = _objects.plus(newSnapshotEntry.getKey(), realNewSnapshotEntry); +// assert val == null; + _snapshotBounds.merge(newSnapshotEntry.getKey().version(), new ArrayDeque<>(List.of(newSnapshotEntry.getKey())), + (a, b) -> { + a.addAll(b); + return a; + }); + } + } + commit.run(); + }); + } finally { + _lock.writeLock().unlock(); + } + +// } + +// verify(); + // Commit under lock, iterators will see new version after the lock is released and writeback + // cache is updated + // TODO: Maybe writeback iterator being invalidated wouldn't be a problem? +// } finally { +// _lock.writeLock().unlock(); +// } + } + + private void unrefSnapshot(long id) { + Log.tracev("Unref snapshot {0}", id); + _lock.writeLock().lock(); + try { + verify(); + var refCount = _snapshotRefCounts.merge(id, -1L, (a, b) -> a + b == 0 ? null : a + b); + if (!(refCount == null && id == _lastAliveSnapshotId)) { + return; + } + + long curCount; + long curId = id; + long nextId; + do { + Log.tracev("Removing snapshot {0}", curId); + _snapshotIds.poll(); + nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek(); + while (nextId == curId) { + _snapshotIds.poll(); + nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek(); + } + + var keys = _snapshotBounds.headMap(curId, true); + + long finalCurId = curId; + long finalNextId = nextId; + ArrayList> toReAdd = new ArrayList<>(); + keys.values().stream().flatMap(Collection::stream).forEach(key -> { + var entry = _objects.get(key); + if (entry == null) { +// Log.warnv("Entry not found for key {0}", key); + return; + } + if (finalNextId == -1) { + Log.tracev("Could not find place to place entry {0}, curId={1}, nextId={2}, whenToRemove={3}, snapshotIds={4}", + entry, finalCurId, finalNextId, entry.whenToRemove(), _snapshotIds); + } else if (finalNextId < entry.whenToRemove()) { + _objects = _objects.plus(new SnapshotKey(key.key(), finalNextId), entry); + assert finalNextId > finalCurId; + toReAdd.add(Pair.of(finalNextId, new SnapshotKey(key.key(), finalNextId))); + } + _objects = _objects.minus(key); + }); + + toReAdd.forEach(p -> { + _snapshotBounds.merge(p.getLeft(), new ArrayDeque<>(List.of(p.getRight())), + (a, b) -> { + a.addAll(b); + return a; + }); + }); + + keys.clear(); + + if (_snapshotIds.isEmpty()) { + _lastAliveSnapshotId = -1; + break; + } + + curId = _snapshotIds.peek(); + _lastAliveSnapshotId = curId; + + curCount = _snapshotRefCounts.getOrDefault(curId, 0L); + } while (curCount == 0); + verify(); + } finally { + _lock.writeLock().unlock(); + } + } + + public static class IllegalSnapshotIdException extends IllegalArgumentException { + public IllegalSnapshotIdException(String message) { + super(message); + } + + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } + } + + public class Snapshot implements AutoCloseableNoThrow { + private final long _id; + private static final Cleaner CLEANER = Cleaner.create(); + private final MutableObject _closed = new MutableObject<>(false); + + public long id() { + return _id; + } + + private Snapshot(long id) { + _id = id; + _lock.writeLock().lock(); + try { + verify(); + if (_lastSnapshotId > id) + throw new IllegalSnapshotIdException("Snapshot id " + id + " is less than last snapshot id " + _lastSnapshotId); + _lastSnapshotId = id; + if (_lastAliveSnapshotId == -1) + _lastAliveSnapshotId = id; + if (_snapshotRefCounts.merge(id, 1L, Long::sum) == 1) { + _snapshotIds.add(id); + } + verify(); + } finally { + _lock.writeLock().unlock(); + } + var closedRef = _closed; + var idRef = _id; + CLEANER.register(this, () -> { + if (!closedRef.getValue()) { + Log.error("Snapshot " + idRef + " was not closed before GC"); + } + }); + } + + public class CheckingSnapshotKvIterator implements CloseableKvIterator { + private final CloseableKvIterator _backing; + + public CheckingSnapshotKvIterator(CloseableKvIterator backing) { + _backing = backing; + } + + @Override + public JObjectKey peekNextKey() { + return _backing.peekNextKey(); + } + + @Override + public void skip() { + _backing.skip(); + } + + @Override + public JObjectKey peekPrevKey() { + return _backing.peekPrevKey(); + } + + @Override + public Pair prev() { + var ret = _backing.prev(); + assert ret.getValue().version() <= _id; + return ret; + } + + @Override + public boolean hasPrev() { + return _backing.hasPrev(); + } + + @Override + public void skipPrev() { + _backing.skipPrev(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasNext(); + } + + @Override + public Pair next() { + var ret = _backing.next(); + assert ret.getValue().version() <= _id; + return ret; + } + } + + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + _lock.readLock().lock(); + try { + Log.tracev("Getting snapshot {0} iterator for {1} {2}\n" + + "objects in snapshots: {3}", _id, start, key, _objects); + return new CheckingSnapshotKvIterator(new TombstoneMergingKvIterator<>("snapshot", start, key, + (tS, tK) -> new SnapshotKvIterator(_objects, _id, tS, tK), + (tS, tK) -> new MappingKvIterator<>( + writebackStore.getIterator(tS, tK), d -> d.version() <= _id ? new Data<>(d) : new Tombstone<>()) + )); + } finally { + _lock.readLock().unlock(); + } + } + + @Nonnull + public Optional readObject(JObjectKey name) { + try (var it = getIterator(IteratorStart.GE, name)) { + if (it.hasNext()) { + if (!it.peekNextKey().equals(name)) { + return Optional.empty(); + } + return Optional.of(it.next().getValue()); + } + } + return Optional.empty(); + } + + @Override + public void close() { + if (_closed.getValue()) { + return; + } + _closed.setValue(true); + unrefSnapshot(_id); + } + } + + public Snapshot createSnapshot() { + _lock.writeLock().lock(); + try { + return new Snapshot(writebackStore.getLastTxId()); + } finally { + _lock.writeLock().unlock(); + } + } + + @Nonnull + public Optional readObjectDirect(JObjectKey name) { + return writebackStore.readObject(name); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java new file mode 100644 index 00000000..1cf28822 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java @@ -0,0 +1,6 @@ +package com.usatiuk.dhfs.objects.transaction; + +public enum LockingStrategy { + OPTIMISTIC, // Optimistic write, no blocking other possible writers/readers + WRITE, // Write lock, blocks all other writers +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java new file mode 100644 index 00000000..e609081b --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java @@ -0,0 +1,138 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.snapshot.SnapshotManager; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +@ApplicationScoped +public class ReadTrackingObjectSourceFactory { + @Inject + LockManager lockManager; + + public ReadTrackingTransactionObjectSource create(SnapshotManager.Snapshot snapshot) { + return new ReadTrackingObjectSourceImpl(snapshot); + } + + public class ReadTrackingObjectSourceImpl implements ReadTrackingTransactionObjectSource { + private final SnapshotManager.Snapshot _snapshot; + + private final Map> _readSet = new HashMap<>(); + + public ReadTrackingObjectSourceImpl(SnapshotManager.Snapshot snapshot) { + _snapshot = snapshot; + } + + public Map> getRead() { + return Collections.unmodifiableMap(_readSet); + } + + @Override + public Optional get(Class type, JObjectKey key) { + var got = _readSet.get(key); + + if (got == null) { + var read = _snapshot.readObject(key); + _readSet.put(key, new TransactionObjectNoLock<>(read)); + return read.map(JDataVersionedWrapper::data).map(type::cast); + } + + return got.data().map(JDataVersionedWrapper::data).map(type::cast); + } + + @Override + public Optional getWriteLocked(Class type, JObjectKey key) { + var got = _readSet.get(key); + + if (got == null) { + var lock = lockManager.lockObject(key); + try { + var read = _snapshot.readObject(key); + _readSet.put(key, new TransactionObjectLocked<>(read, lock)); + return read.map(JDataVersionedWrapper::data).map(type::cast); + } catch (Exception e) { + lock.close(); + throw e; + } + } + + return got.data().map(JDataVersionedWrapper::data).map(type::cast); + } + + @Override + public void close() { +// for (var it : _iterators) { +// it.close(); +// } + } + + private class ReadTrackingIterator implements CloseableKvIterator { + private final CloseableKvIterator _backing; + + public ReadTrackingIterator(IteratorStart start, JObjectKey key) { + _backing = _snapshot.getIterator(start, key); + } + + @Override + public JObjectKey peekNextKey() { + return _backing.peekNextKey(); + } + + @Override + public void skip() { + _backing.skip(); + } + + @Override + public JObjectKey peekPrevKey() { + return _backing.peekPrevKey(); + } + + @Override + public Pair prev() { + var got = _backing.prev(); + _readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(got.getValue()))); + return Pair.of(got.getKey(), got.getValue().data()); + } + + @Override + public boolean hasPrev() { + return _backing.hasPrev(); + } + + @Override + public void skipPrev() { + _backing.skipPrev(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasNext(); + } + + @Override + public Pair next() { + var got = _backing.next(); + _readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(got.getValue()))); + return Pair.of(got.getKey(), got.getValue().data()); + } + } + + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new ReadTrackingIterator(start, key); + } + } +} \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java new file mode 100644 index 00000000..171ea1d4 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java @@ -0,0 +1,26 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.Iterator; +import java.util.Map; +import java.util.Optional; + +public interface ReadTrackingTransactionObjectSource extends AutoCloseableNoThrow { + Optional get(Class type, JObjectKey key); + + Optional getWriteLocked(Class type, JObjectKey key); + + CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); + + default CloseableKvIterator getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } + + Map> getRead(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java new file mode 100644 index 00000000..3120999b --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -0,0 +1,35 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Optional; + +// The transaction interface actually used by user code to retrieve objects +public interface Transaction extends TransactionHandle { + void onCommit(Runnable runnable); + + Optional get(Class type, JObjectKey key, LockingStrategy strategy); + + void put(JData obj); + + void delete(JObjectKey key); + + @Nonnull + Collection findAllObjects(); // FIXME: This is crap + + default Optional get(Class type, JObjectKey key) { + return get(type, key, LockingStrategy.OPTIMISTIC); + } + + CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); + + default CloseableKvIterator getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java new file mode 100644 index 00000000..634daa22 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java @@ -0,0 +1,5 @@ +package com.usatiuk.dhfs.objects.transaction; + +public interface TransactionFactory { + TransactionPrivate createTransaction(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java new file mode 100644 index 00000000..331fb033 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -0,0 +1,145 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.snapshot.SnapshotManager; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import javax.annotation.Nonnull; +import java.util.*; + +@ApplicationScoped +public class TransactionFactoryImpl implements TransactionFactory { + @Inject + SnapshotManager snapshotManager; + @Inject + ReadTrackingObjectSourceFactory readTrackingObjectSourceFactory; + + @Override + public TransactionPrivate createTransaction() { + return new TransactionImpl(); + } + + private class TransactionImpl implements TransactionPrivate { + private final ReadTrackingTransactionObjectSource _source; + + private final NavigableMap> _writes = new TreeMap<>(); + + private Map> _newWrites = new HashMap<>(); + private final List _onCommit = new ArrayList<>(); + private final List _onFlush = new ArrayList<>(); + private final SnapshotManager.Snapshot _snapshot; + + private TransactionImpl() { + _snapshot = snapshotManager.createSnapshot(); + _source = readTrackingObjectSourceFactory.create(_snapshot); + } + + @Override + public void onCommit(Runnable runnable) { + _onCommit.add(runnable); + } + + @Override + public void onFlush(Runnable runnable) { + _onFlush.add(runnable); + } + + @Override + public Collection getOnCommit() { + return Collections.unmodifiableCollection(_onCommit); + } + + @Override + public SnapshotManager.Snapshot snapshot() { + return _snapshot; + } + + @Override + public Collection getOnFlush() { + return Collections.unmodifiableCollection(_onFlush); + } + + @Override + public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { + switch (_writes.get(key)) { + case TxRecord.TxObjectRecordWrite write -> { + return Optional.of(type.cast(write.data())); + } + case TxRecord.TxObjectRecordDeleted deleted -> { + return Optional.empty(); + } + case null, default -> { + } + } + + return switch (strategy) { + case OPTIMISTIC -> _source.get(type, key); + case WRITE -> _source.getWriteLocked(type, key); + }; + } + + @Override + public void delete(JObjectKey key) { + var got = _writes.get(key); + if (got != null) { + if (got instanceof TxRecord.TxObjectRecordDeleted) { + return; + } + } + + _writes.put(key, new TxRecord.TxObjectRecordDeleted(key)); + _newWrites.put(key, new TxRecord.TxObjectRecordDeleted(key)); + } + + @Nonnull + @Override + public Collection findAllObjects() { +// return store.findAllObjects(); + return List.of(); + } + + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + Log.tracev("Getting tx iterator with start={0}, key={1}", start, key); + return new TombstoneMergingKvIterator<>("tx", start, key, + (tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK), t -> switch (t) { + case TxRecord.TxObjectRecordWrite write -> new Data<>(write.data()); + case TxRecord.TxObjectRecordDeleted deleted -> new Tombstone<>(); + case null, default -> null; + }), + (tS, tK) -> new MappingKvIterator<>(_source.getIterator(tS, tK), Data::new)); + } + + @Override + public void put(JData obj) { + _writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj)); + _newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj)); + } + + @Override + public Collection> drainNewWrites() { + var ret = _newWrites; + _newWrites = new HashMap<>(); + return ret.values(); + } + + @Override + public Map> reads() { + return _source.getRead(); + } + + @Override + public ReadTrackingTransactionObjectSource readSource() { + return _source; + } + + @Override + public void close() { + _source.close(); + _snapshot.close(); + } + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java new file mode 100644 index 00000000..262798c6 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java @@ -0,0 +1,5 @@ +package com.usatiuk.dhfs.objects.transaction; + +public interface TransactionHandle { + void onFlush(Runnable runnable); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandlePrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandlePrivate.java new file mode 100644 index 00000000..cc9c8e7d --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandlePrivate.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects.transaction; + +import java.util.Collection; + +public interface TransactionHandlePrivate extends TransactionHandle { + Collection getOnFlush(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java new file mode 100644 index 00000000..05826900 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; + +import java.util.Optional; + +public interface TransactionObject { + Optional data(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java new file mode 100644 index 00000000..766a3a63 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -0,0 +1,21 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.snapshot.SnapshotManager; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; + +import java.util.Collection; +import java.util.Map; + +// The transaction interface actually used by user code to retrieve objects +public interface TransactionPrivate extends Transaction, TransactionHandlePrivate, AutoCloseableNoThrow { + Collection> drainNewWrites(); + + Map> reads(); + + ReadTrackingTransactionObjectSource readSource(); + + Collection getOnCommit(); + + SnapshotManager.Snapshot snapshot(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java new file mode 100644 index 00000000..f5510e6f --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -0,0 +1,20 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +public class TxRecord { + public interface TxObjectRecord { + JObjectKey key(); + } + + public record TxObjectRecordWrite(JData data) implements TxObjectRecord { + @Override + public JObjectKey key() { + return data.key(); + } + } + + public record TxObjectRecordDeleted(JObjectKey key) implements TxObjectRecord { + } +} diff --git a/dhfs-parent/objects/src/main/resources/META-INF/beans.xml b/dhfs-parent/objects/src/main/resources/META-INF/beans.xml new file mode 100644 index 00000000..e69de29b diff --git a/dhfs-parent/objects/src/main/resources/application.properties b/dhfs-parent/objects/src/main/resources/application.properties new file mode 100644 index 00000000..71d81280 --- /dev/null +++ b/dhfs-parent/objects/src/main/resources/application.properties @@ -0,0 +1,8 @@ +dhfs.objects.persistence=lmdb +dhfs.objects.writeback.limit=134217728 +dhfs.objects.lru.limit=134217728 +dhfs.objects.lru.print-stats=true +dhfs.objects.lock_timeout_secs=15 +dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs +quarkus.package.jar.decompiler.enabled=true +dhfs.objects.persistence.snapshot-extra-checks=false \ No newline at end of file diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java new file mode 100644 index 00000000..0c78cd79 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java @@ -0,0 +1,79 @@ +package com.usatiuk.dhfs.objects; + +import org.junit.jupiter.api.Assertions; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; + +public abstract class Just { + public static void run(Callable callable) { + new Thread(() -> { + try { + callable.call(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }).start(); + } + + public static void runAll(Callable... callables) { + try { + try (var exs = Executors.newFixedThreadPool(callables.length)) { + exs.invokeAll(Arrays.stream(callables).map(c -> (Callable) () -> { + try { + return c.call(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }).toList()).forEach(f -> { + try { + f.get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static void runAll(Runnable... callables) { + try { + try (var exs = Executors.newFixedThreadPool(callables.length)) { + exs.invokeAll(Arrays.stream(callables).map(c -> (Callable) () -> { + try { + c.run(); + return null; + } catch (Exception e) { + throw new RuntimeException(e); + } + }).toList()).forEach(f -> { + try { + f.get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static void checkIterator(Iterator it, List expected) { + for (var e : expected) { + Assertions.assertTrue(it.hasNext()); + var next = it.next(); + Assertions.assertEquals(e, next); + } + } + + @SafeVarargs + public static void checkIterator(Iterator it, K... expected) { + checkIterator(it, Arrays.asList(expected)); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/KeyPredicateKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/KeyPredicateKvIteratorTest.java new file mode 100644 index 00000000..055f4f29 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/KeyPredicateKvIteratorTest.java @@ -0,0 +1,154 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.pcollections.TreePMap; + +import java.util.List; + +public class KeyPredicateKvIteratorTest { + + @Test + public void simpleTest() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.GT, 3), + IteratorStart.GE, 3, v -> (v % 2 == 0)); + var expected = List.of(Pair.of(6, 6)); + for (var pair : expected) { + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(pair, pit.next()); + } + } + + @Test + public void ltTest() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + var expected = List.of(Pair.of(6, 6)); + for (var pair : expected) { + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(pair, pit.next()); + } + Assertions.assertFalse(pit.hasNext()); + } + + @Test + public void ltTest2() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 3), + IteratorStart.LT, 2, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 5), + IteratorStart.LE, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6)); + Assertions.assertFalse(pit.hasNext()); + } + + @Test + public void ltTest3() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6).plus(7, 7).plus(8, 8); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7), + IteratorStart.LT, 7, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 8), + IteratorStart.LT, 8, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 6), + IteratorStart.LE, 6, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0)); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(Pair.of(6, 6), pit.next()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(8, pit.peekNextKey()); + Assertions.assertEquals(6, pit.peekPrevKey()); + Assertions.assertEquals(8, pit.peekNextKey()); + Assertions.assertEquals(6, pit.peekPrevKey()); + } + + @Test + public void itTest4() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6).plus(8, 8).plus(10, 10); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7), + IteratorStart.LT, 7, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0)); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(Pair.of(6, 6), pit.next()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(8, pit.peekNextKey()); + Assertions.assertEquals(6, pit.peekPrevKey()); + Assertions.assertEquals(8, pit.peekNextKey()); + Assertions.assertEquals(6, pit.peekPrevKey()); + } + +// @Test +// public void reverseTest() { +// var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6); +// var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), +// IteratorStart.LT, 4, v -> (v % 2 == 0) ); +// +// } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java new file mode 100644 index 00000000..430dc635 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java @@ -0,0 +1,348 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.pcollections.TreePMap; + +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; + +public class MergingKvIteratorTest { + + private class SimpleIteratorWrapper, V> implements CloseableKvIterator { + private final Iterator> _iterator; + private Pair _next; + + public SimpleIteratorWrapper(Iterator> iterator) { + _iterator = iterator; + fillNext(); + } + + private void fillNext() { + while (_iterator.hasNext() && _next == null) { + _next = _iterator.next(); + } + } + + @Override + public K peekNextKey() { + if (_next == null) { + throw new NoSuchElementException(); + } + return _next.getKey(); + } + + @Override + public void skip() { + if (_next == null) { + throw new NoSuchElementException(); + } + _next = null; + fillNext(); + } + + @Override + public K peekPrevKey() { + throw new UnsupportedOperationException(); + } + + @Override + public Pair prev() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasPrev() { + throw new UnsupportedOperationException(); + } + + @Override + public void skipPrev() { + throw new UnsupportedOperationException(); + + } + + @Override + public void close() { + } + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public Pair next() { + if (_next == null) { + throw new NoSuchElementException("No more elements"); + } + var ret = _next; + _next = null; + fillNext(); + return ret; + } + } + + @Test + public void testTestIterator() { + var list = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)); + var iterator = new SimpleIteratorWrapper<>(list.iterator()); + var realIterator = list.iterator(); + while (realIterator.hasNext()) { + Assertions.assertTrue(iterator.hasNext()); + Assertions.assertEquals(realIterator.next(), iterator.next()); + } + Assertions.assertFalse(iterator.hasNext()); + + var emptyList = List.>of(); + var emptyIterator = new SimpleIteratorWrapper<>(emptyList.iterator()); + Assertions.assertFalse(emptyIterator.hasNext()); + } + + @Test + public void testSimple() { + var source1 = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)).iterator(); + var source2 = List.of(Pair.of(2, 3), Pair.of(4, 5), Pair.of(6, 7)).iterator(); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1), (a, b) -> new SimpleIteratorWrapper<>(source2)); + var expected = List.of(Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4), Pair.of(4, 5), Pair.of(5, 6), Pair.of(6, 7)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + } + + @Test + public void testPriority() { + var source1 = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6)); + var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7)); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator())); + var expected = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator())); + var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriority2() { + var source1 = List.of(Pair.of(2, 4), Pair.of(5, 6)); + var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5)); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator())); + var expected = List.of(Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator())); + var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe() { + var source1 = TreePMap.empty().plus(2, 4).plus(5, 6); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + Just.checkIterator(mergingIterator.reversed(), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3)); + Assertions.assertFalse(mergingIterator.reversed().hasNext()); + Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6)); + Assertions.assertFalse(mergingIterator.hasNext()); + + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(5, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + Just.checkIterator(mergingIterator2.reversed(), Pair.of(5, 6), Pair.of(2, 5), Pair.of(1, 3)); + Assertions.assertFalse(mergingIterator2.reversed().hasNext()); + Just.checkIterator(mergingIterator2, Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6)); + Assertions.assertFalse(mergingIterator2.hasNext()); + + var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); + } + + @Test + public void testPriorityLe2() { + var source1 = TreePMap.empty().plus(2, 4).plus(5, 6); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(3, 4); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + } + + @Test + public void testPriorityLe3() { + var source1 = TreePMap.empty().plus(2, 4).plus(5, 6); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(6, 8); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(5, 6), Pair.of(6, 8)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + Just.checkIterator(mergingIterator.reversed(), Pair.of(6, 8), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3)); + Assertions.assertFalse(mergingIterator.reversed().hasNext()); + Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6), Pair.of(6, 8)); + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(5, 6), Pair.of(6, 8)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + + var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); + Assertions.assertTrue(mergingIterator3.hasPrev()); + Assertions.assertTrue(mergingIterator3.hasNext()); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + } + + @Test + public void testPriorityLe4() { + var source1 = TreePMap.empty().plus(6, 7); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(3, 4); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(3, 4), Pair.of(6, 7)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe5() { + var source1 = TreePMap.empty().plus(1, 2).plus(6, 7); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(3, 4); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(3, 4), Pair.of(6, 7)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe6() { + var source1 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(3, 4); + var source2 = TreePMap.empty().plus(4, 6); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(4, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(4, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe7() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); + var source2 = TreePMap.empty().plus(1, 4).plus(3, 5).plus(4, 6); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + Just.checkIterator(mergingIterator.reversed(), Pair.of(4, 6), Pair.of(3, 5), Pair.of(1, 3)); + Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6)); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(1, 4), Pair.of(3, 5), Pair.of(4, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLt() { + var source1 = TreePMap.empty().plus(2, 4).plus(5, 6); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(2, 4), Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(2, 5), Pair.of(5, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/NavigableMapKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/NavigableMapKvIteratorTest.java new file mode 100644 index 00000000..de7a5666 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/NavigableMapKvIteratorTest.java @@ -0,0 +1,71 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.pcollections.TreePMap; + +import java.util.NavigableMap; + +public class NavigableMapKvIteratorTest { + private final NavigableMap _testMap1 = TreePMap.empty().plus(1, 2).plus(2, 3).plus(3, 4); + + @Test + void test1() { + var iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 3); + Just.checkIterator(iterator, Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 2); + Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GE, 2); + Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 2); + Just.checkIterator(iterator, Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 3); + Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 2); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 1); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 1); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 3); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 4); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 0); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GE, 2); + Assertions.assertTrue(iterator.hasNext()); + Assertions.assertEquals(2, iterator.peekNextKey()); + Assertions.assertEquals(1, iterator.peekPrevKey()); + Assertions.assertEquals(2, iterator.peekNextKey()); + Assertions.assertEquals(1, iterator.peekPrevKey()); + Just.checkIterator(iterator.reversed(), Pair.of(1, 2)); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertEquals(Pair.of(3, 4), iterator.prev()); + Assertions.assertEquals(Pair.of(2, 3), iterator.prev()); + Assertions.assertEquals(Pair.of(2, 3), iterator.next()); + } + +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java new file mode 100644 index 00000000..a4bdfb57 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.ObjectsTestProfileExtraChecks.class) +public class ObjectsTestExtraChecks extends ObjectsTestImpl { +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java new file mode 100644 index 00000000..32a9ea31 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java @@ -0,0 +1,941 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.data.Parent; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.RepeatedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +class Profiles { + public static class ObjectsTestProfileExtraChecks extends TempDataProfile { + @Override + protected void getConfigOverrides(Map toPut) { + toPut.put("dhfs.objects.persistence.snapshot-extra-checks", "true"); + } + } + + public static class ObjectsTestProfileNoExtraChecks extends TempDataProfile { + @Override + protected void getConfigOverrides(Map toPut) { + toPut.put("dhfs.objects.persistence.snapshot-extra-checks", "false"); + } + } +} + +public abstract class ObjectsTestImpl { + @Inject + TransactionManager txm; + + @Inject + Transaction curTx; + + private void deleteAndCheck(JObjectKey key) { + txm.run(() -> { + curTx.delete(key); + }); + + txm.run(() -> { + var parent = curTx.get(JData.class, key).orElse(null); + Assertions.assertNull(parent); + }); + } + + @Test + void createObject() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentCreate"), "John"); + curTx.put(newParent); + }); + + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate")).orElse(null); + Assertions.assertEquals("John", parent.name()); + }); + } + + @Test + void createGetObject() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentCreateGet"), "John"); + curTx.put(newParent); + var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateGet")).orElse(null); + Assertions.assertEquals("John", parent.name()); + }); + + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreateGet")).orElse(null); + Assertions.assertEquals("John", parent.name()); + }); + } + + @RepeatedTest(100) + void createDeleteObject() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentCreateDeleteObject"), "John"); + curTx.put(newParent); + }); + + txm.run(() -> { + var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateDeleteObject")).orElse(null); + Assertions.assertEquals("John", parent.name()); + }); + + txm.run(() -> { + curTx.delete(new JObjectKey("ParentCreateDeleteObject")); + }); + + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreateDeleteObject")).orElse(null); + Assertions.assertNull(parent); + }); + } + + @Test + void createCreateObject() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("Parent7"), "John"); + curTx.put(newParent); + }); + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("Parent7"), "John2"); + curTx.put(newParent); + }); + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("Parent7")).orElse(null); + Assertions.assertEquals("John2", parent.name()); + }); + } + + @Test + void editObject() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("Parent3"), "John"); + curTx.put(newParent); + }); + + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null); + Assertions.assertEquals("John", parent.name()); + curTx.put(parent.withName("John2")); + }); + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.WRITE).orElse(null); + Assertions.assertEquals("John2", parent.name()); + curTx.put(parent.withName("John3")); + }); + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("Parent3")).orElse(null); + Assertions.assertEquals("John3", parent.name()); + }); + } + + @Test + @Disabled + void createObjectConflict() { + AtomicBoolean thread1Failed = new AtomicBoolean(true); + AtomicBoolean thread2Failed = new AtomicBoolean(true); + + var barrier = new CyclicBarrier(2); + var latch = new CountDownLatch(2); + + Just.run(() -> { + try { + Log.warn("Thread 1"); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); + var newParent = new Parent(JObjectKey.of("Parent2"), "John"); + curTx.put(newParent); + Log.warn("Thread 1 commit"); + }, 0); + thread1Failed.set(false); + return null; + } finally { + latch.countDown(); + } + }); + Just.run(() -> { + try { + Log.warn("Thread 2"); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); + var newParent = new Parent(JObjectKey.of("Parent2"), "John2"); + curTx.put(newParent); + Log.warn("Thread 2 commit"); + }, 0); + thread2Failed.set(false); + return null; + } finally { + latch.countDown(); + } + }); + + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + var got = txm.run(() -> { + return curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); + }); + + if (!thread1Failed.get()) { + Assertions.assertTrue(thread2Failed.get()); + Assertions.assertEquals("John", got.name()); + } else if (!thread2Failed.get()) { + Assertions.assertEquals("John2", got.name()); + } else { + Assertions.fail("No thread succeeded"); + } + } + + @ParameterizedTest + @EnumSource(LockingStrategy.class) + void editConflict(LockingStrategy strategy) { + String key = "Parent4" + strategy.name(); + txm.run(() -> { + var newParent = new Parent(JObjectKey.of(key), "John3"); + curTx.put(newParent); + }); + + AtomicBoolean thread1Failed = new AtomicBoolean(true); + AtomicBoolean thread2Failed = new AtomicBoolean(true); + + var barrier = new CyclicBarrier(2); + var latchEnd = new CountDownLatch(2); + + Just.run(() -> { + try { + Log.warn("Thread 1"); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); + curTx.put(parent.withName("John")); + Log.warn("Thread 1 commit"); + }, 0); + Log.warn("Thread 1 commit done"); + thread1Failed.set(false); + return null; + } finally { + latchEnd.countDown(); + } + }); + Just.run(() -> { + try { + Log.warn("Thread 2"); + barrier.await(); // Ensure thread 2 tx id is larger than thread 1 + txm.runTries(() -> { + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); + curTx.put(parent.withName("John2")); + Log.warn("Thread 2 commit"); + }, 0); + Log.warn("Thread 2 commit done"); + thread2Failed.set(false); + return null; + } finally { + latchEnd.countDown(); + } + }); + + try { + latchEnd.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + var got = txm.run(() -> { + return curTx.get(Parent.class, new JObjectKey(key)).orElse(null); + }); + + if (!thread1Failed.get() && !thread2Failed.get()) { + Assertions.assertTrue(got.name().equals("John") || got.name().equals("John2")); + return; + } + + Assertions.assertFalse(!thread1Failed.get() && !thread2Failed.get()); + + if (!thread1Failed.get()) { + if (!thread2Failed.get()) { + Assertions.assertEquals("John2", got.name()); + } else { + Assertions.assertEquals("John", got.name()); + } + } else { + Assertions.assertFalse(thread2Failed.get()); + Assertions.assertEquals("John2", got.name()); + } + } + + @ParameterizedTest + @EnumSource(LockingStrategy.class) + void editConflict2(LockingStrategy strategy) { + String key = "EditConflict2" + strategy.name(); + txm.run(() -> { + var newParent = new Parent(JObjectKey.of(key), "John3"); + curTx.put(newParent); + }); + + AtomicBoolean thread1Failed = new AtomicBoolean(true); + AtomicBoolean thread2Failed = new AtomicBoolean(true); + + var barrier = new CyclicBarrier(2); + var latchEnd = new CountDownLatch(2); + + Just.run(() -> { + try { + Log.warn("Thread 1"); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); + curTx.put(parent.withName("John")); + Log.warn("Thread 1 commit"); + }, 0); + Log.warn("Thread 1 commit done"); + thread1Failed.set(false); + return null; + } finally { + latchEnd.countDown(); + } + }); + Just.run(() -> { + try { + Log.warn("Thread 2"); + txm.runTries(() -> { + // Ensure they will conflict + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); + curTx.put(parent.withName("John2")); + Log.warn("Thread 2 commit"); + }, 0); + Log.warn("Thread 2 commit done"); + thread2Failed.set(false); + return null; + } finally { + latchEnd.countDown(); + } + }); + + try { + latchEnd.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + var got = txm.run(() -> { + return curTx.get(Parent.class, new JObjectKey(key)).orElse(null); + }); + + Assertions.assertFalse(!thread1Failed.get() && !thread2Failed.get()); + + if (!thread1Failed.get()) { + if (!thread2Failed.get()) { + Assertions.assertEquals("John2", got.name()); + } else { + Assertions.assertEquals("John", got.name()); + } + } else { + Assertions.assertFalse(thread2Failed.get()); + Assertions.assertEquals("John2", got.name()); + } + } + + @RepeatedTest(100) + void snapshotTest1() { + var key = "SnapshotTest1"; + var barrier1 = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + try (ExecutorService ex = Executors.newFixedThreadPool(3)) { + ex.invokeAll(List.of( + () -> { + barrier1.await(); + Log.info("Thread 2 starting tx"); + txm.run(() -> { + Log.info("Thread 2 started tx"); + curTx.put(new Parent(JObjectKey.of(key), "John")); + Log.info("Thread 2 committing"); + }); + Log.info("Thread 2 commited"); + try { + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + return null; + }, + () -> { + Log.info("Thread 1 starting tx"); + txm.run(() -> { + try { + Log.info("Thread 1 started tx"); + barrier1.await(); + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + Log.info("Thread 1 reading"); + Assertions.assertTrue(curTx.get(Parent.class, new JObjectKey(key)).isEmpty()); + Log.info("Thread 1 done reading"); + }); + Log.info("Thread 1 finished"); + return null; + } + )).forEach(f -> { + try { + f.get(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + txm.run(() -> { + Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + }); + deleteAndCheck(new JObjectKey(key)); + } + + @RepeatedTest(100) + void snapshotTest2() { + var key = "SnapshotTest2"; + var barrier1 = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + }); + try (ExecutorService ex = Executors.newFixedThreadPool(3)) { + ex.invokeAll(List.of( + () -> { + barrier1.await(); + Log.info("Thread 2 starting tx"); + txm.run(() -> { + Log.info("Thread 2 started tx"); + curTx.put(new Parent(JObjectKey.of(key), "John2")); + Log.info("Thread 2 committing"); + }); + Log.info("Thread 2 commited"); + try { + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + return null; + }, + () -> { + Log.info("Thread 1 starting tx"); + txm.run(() -> { + try { + Log.info("Thread 1 started tx"); + barrier1.await(); + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + Log.info("Thread 1 reading"); + Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + Log.info("Thread 1 done reading"); + }); + Log.info("Thread 1 finished"); + return null; + } + )).forEach(f -> { + try { + f.get(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + txm.run(() -> { + Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + }); + deleteAndCheck(new JObjectKey(key)); + } + + @RepeatedTest(100) + void snapshotTest3() { + var key = "SnapshotTest3"; + var barrier0 = new CountDownLatch(1); + var barrier1 = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + }).onFlush(barrier0::countDown); + try { + barrier0.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + try (ExecutorService ex = Executors.newFixedThreadPool(3)) { + ex.invokeAll(List.of( + () -> { + barrier1.await(); + Log.info("Thread 2 starting tx"); + txm.run(() -> { + Log.info("Thread 2 started tx"); + curTx.put(new Parent(JObjectKey.of(key), "John2")); + Log.info("Thread 2 committing"); + }); + Log.info("Thread 2 commited"); + try { + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + return null; + }, + () -> { + Log.info("Thread 1 starting tx"); + txm.run(() -> { + try { + Log.info("Thread 1 started tx"); + barrier1.await(); + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + Log.info("Thread 1 reading"); + Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + Log.info("Thread 1 done reading"); + }); + Log.info("Thread 1 finished"); + return null; + } + )).forEach(f -> { + try { + f.get(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + txm.run(() -> { + Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + }); + deleteAndCheck(new JObjectKey(key)); + } + + @RepeatedTest(100) + void simpleIterator1() { + var key = "SimpleIterator1"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + iter.close(); + }); + } + + @RepeatedTest(100) + void simpleIterator2() { + var key = "SimpleIterator2"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + }); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.LT, new JObjectKey(key + "_5"))) { + var got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.prev(); + Assertions.assertEquals(key4, got.getKey().name()); + Assertions.assertTrue(iter.hasNext()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + }); + txm.run(() -> { + curTx.delete(new JObjectKey(key)); + curTx.delete(new JObjectKey(key1)); + curTx.delete(new JObjectKey(key2)); + curTx.delete(new JObjectKey(key3)); + curTx.delete(new JObjectKey(key4)); + }); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + } + }); + } + + @RepeatedTest(100) + void concurrentIterator1() { + var key = "ConcurrentIterator1"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + var barrier = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + Just.runAll(() -> { + barrier.await(); + txm.run(() -> { + Log.info("Thread 1 starting tx"); + try { + barrier2.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + Log.info("Thread 1 committing"); + }); + Log.info("Thread 1 commited"); + return null; + }, () -> { + txm.run(() -> { + Log.info("Thread 2 starting tx"); + try { + barrier.await(); + barrier2.await(); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Log.info("Thread 2 finished"); + return null; + }); + Log.info("All threads finished"); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + }); + txm.run(() -> { + curTx.delete(new JObjectKey(key)); + curTx.delete(new JObjectKey(key1)); + curTx.delete(new JObjectKey(key2)); + curTx.delete(new JObjectKey(key3)); + curTx.delete(new JObjectKey(key4)); + }); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + } + }); + } + + @RepeatedTest(100) + void concurrentIterator2() { + var key = "ConcurrentIterator2"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + var barrier = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + Just.runAll(() -> { + barrier.await(); + txm.run(() -> { + Log.info("Thread 1 starting tx"); + try { + barrier2.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + curTx.put(new Parent(JObjectKey.of(key2), "John5")); + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + Log.info("Thread 1 committing"); + }); + Log.info("Thread 1 commited"); + return null; + }, () -> { + txm.run(() -> { + Log.info("Thread 2 starting tx"); + try { + barrier.await(); + barrier2.await(); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Log.info("Thread 2 finished"); + return null; + }); + Log.info("All threads finished"); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John5", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + }); + txm.run(() -> { + curTx.delete(new JObjectKey(key)); + curTx.delete(new JObjectKey(key1)); + curTx.delete(new JObjectKey(key2)); + curTx.delete(new JObjectKey(key3)); + curTx.delete(new JObjectKey(key4)); + }); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + } + }); + } + + @RepeatedTest(100) + void concurrentIterator3() { + var key = "ConcurrentIterator3"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + var barrier = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + Just.runAll(() -> { + barrier.await(); + txm.run(() -> { + Log.info("Thread 1 starting tx"); + try { + barrier2.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + curTx.delete(new JObjectKey(key2)); + Log.info("Thread 1 committing"); + }); + Log.info("Thread 1 commited"); + return null; + }, () -> { + txm.run(() -> { + Log.info("Thread 2 starting tx"); + try { + barrier.await(); + barrier2.await(); + try (var iter = curTx.getIterator(IteratorStart.LE, new JObjectKey(key3))) { + var got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + Assertions.assertTrue(iter.hasNext()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.prev(); + Assertions.assertEquals(key4, got.getKey().name()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.prev(); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.prev(); + Assertions.assertEquals(key1, got.getKey().name()); + Assertions.assertTrue(iter.hasNext()); + got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Log.info("Thread 2 finished"); + return null; + }); + Log.info("All threads finished"); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + }); + txm.run(() -> { + curTx.delete(new JObjectKey(key)); + curTx.delete(new JObjectKey(key1)); + curTx.delete(new JObjectKey(key3)); + curTx.delete(new JObjectKey(key4)); + }); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + } + }); + } + + @RepeatedTest(100) + void allParallel() { + Just.runAll( + () -> createObject(), + () -> createGetObject(), + () -> createDeleteObject(), + () -> createCreateObject(), + () -> editConflict(LockingStrategy.WRITE), + () -> editConflict(LockingStrategy.OPTIMISTIC), + () -> editConflict2(LockingStrategy.WRITE), + () -> editConflict2(LockingStrategy.OPTIMISTIC), + () -> snapshotTest1(), + () -> snapshotTest2(), + () -> snapshotTest3(), + () -> simpleIterator1(), + () -> simpleIterator2(), + () -> concurrentIterator1(), + () -> concurrentIterator2(), + () -> concurrentIterator3() + ); + } + +// } +// +// @Test +// void nestedCreate() { +// { +// var tx = _tx.beginTransaction(); +// var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); +// var kid = tx.getObject(new JObjectKey("Kid"), Kid.class); +// parent.setName("John"); +// kid.setName("KidName"); +// parent.setKidKey(kid.getKey()); +// tx.commit(); +// } +// +// { +// var tx2 = _tx.beginTransaction(); +// var parent = tx2.getObject(new JObjectKey("Parent")); +// Assertions.assertInstanceOf(Parent.class, parent); +// Assertions.assertEquals("John", ((Parent) parent).getName()); +// Assertions.assertEquals("KidName", ((Parent) parent).getKid().getName()); +// } +// } + +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestNoExtraChecks.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestNoExtraChecks.java new file mode 100644 index 00000000..661a9e4d --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestNoExtraChecks.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.ObjectsTestProfileNoExtraChecks.class) +public class ObjectsTestNoExtraChecks extends ObjectsTestImpl { +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java new file mode 100644 index 00000000..1bae7b0a --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java @@ -0,0 +1,115 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.data.Parent; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import io.quarkus.test.junit.mockito.InjectSpy; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +@QuarkusTest +@TestProfile(TempDataProfile.class) +public class PreCommitTxHookTest { + @Inject + TransactionManager txm; + + @Inject + Transaction curTx; + @InjectSpy + private DummyPreCommitTxHook spyHook; + + @Test + void createObject() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentCreate2"), "John"); + curTx.put(newParent); + }); + + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate2")).orElse(null); + Assertions.assertEquals("John", parent.name()); + }); + + ArgumentCaptor dataCaptor = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); + Mockito.verify(spyHook, Mockito.times(1)).onCreate(keyCaptor.capture(), dataCaptor.capture()); + Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name()); + Assertions.assertEquals(new JObjectKey("ParentCreate2"), keyCaptor.getValue()); + } + + @Test + void deleteObject() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentDel"), "John"); + curTx.put(newParent); + }); + + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentDel")).orElse(null); + Assertions.assertEquals("John", parent.name()); + }); + + txm.run(() -> { + curTx.delete(new JObjectKey("ParentDel")); + }); + + ArgumentCaptor dataCaptor = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); + Mockito.verify(spyHook, Mockito.times(1)).onDelete(keyCaptor.capture(), dataCaptor.capture()); + Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name()); + Assertions.assertEquals(new JObjectKey("ParentDel"), keyCaptor.getValue()); + } + + @Test + void editObject() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentEdit"), "John"); + curTx.put(newParent); + }); + + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentEdit"), "John changed"); + curTx.put(newParent); + }); + + ArgumentCaptor dataCaptorOld = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor dataCaptorNew = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); + Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture()); + Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).name()); + Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).name()); + Assertions.assertEquals(new JObjectKey("ParentEdit"), keyCaptor.getValue()); + } + + @Test + void editObjectWithGet() { + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentEdit2"), "John"); + curTx.put(newParent); + }); + + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentEdit2")).orElse(null); + Assertions.assertEquals("John", parent.name()); + curTx.put(parent.withName("John changed")); + }); + + ArgumentCaptor dataCaptorOld = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor dataCaptorNew = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); + Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture()); + Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).name()); + Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).name()); + Assertions.assertEquals(new JObjectKey("ParentEdit2"), keyCaptor.getValue()); + } + + @ApplicationScoped + public static class DummyPreCommitTxHook implements PreCommitTxHook { + } + +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java new file mode 100644 index 00000000..05ad6d4b --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java @@ -0,0 +1,161 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.pcollections.TreePMap; + +import java.util.List; + +public class PredicateKvIteratorTest { + + @Test + public void simpleTest() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.GT, 1), + IteratorStart.GE, 1, v -> (v % 2 == 0) ? v : null); + var expected = List.of(Pair.of(4, 6)); + for (var pair : expected) { + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(pair, pit.next()); + } + } + + @Test + public void ltTest() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); + var expected = List.of(Pair.of(4, 6)); + for (var pair : expected) { + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(pair, pit.next()); + } + Assertions.assertFalse(pit.hasNext()); + } + + @Test + public void ltTest2() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 1), + IteratorStart.LT, 1, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 2), + IteratorStart.LT, 2, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 4), + IteratorStart.LE, 4, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6)); + Assertions.assertFalse(pit.hasNext()); + } + + @Test + public void ltTest3() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 7).plus(6, 8); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7), + IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 8), + IteratorStart.LT, 8, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 6), + IteratorStart.LE, 6, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(4, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(4, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(Pair.of(4, 6), pit.next()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(4, pit.peekPrevKey()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(4, pit.peekPrevKey()); + } + + @Test + public void itTest4() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 8).plus(6, 10); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(5, 8), Pair.of(6, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7), + IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(6, 10)); + Assertions.assertFalse(pit.hasNext()); + Assertions.assertTrue(pit.hasPrev()); + Assertions.assertEquals(6, pit.peekPrevKey()); + Assertions.assertEquals(Pair.of(6, 10), pit.prev()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(5, pit.peekNextKey()); + Assertions.assertTrue(pit.hasPrev()); + Assertions.assertEquals(4, pit.peekPrevKey()); + Assertions.assertEquals(5, pit.peekNextKey()); + Assertions.assertEquals(4, pit.peekPrevKey()); + Assertions.assertEquals(Pair.of(5, 8), pit.next()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(5, pit.peekPrevKey()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(5, pit.peekPrevKey()); + } + +// @Test +// public void reverseTest() { +// var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); +// var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), +// IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); +// +// } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TempDataProfile.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TempDataProfile.java new file mode 100644 index 00000000..16c37789 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TempDataProfile.java @@ -0,0 +1,30 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.test.junit.QuarkusTestProfile; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +public class TempDataProfile implements QuarkusTestProfile { + protected void getConfigOverrides(Map toPut) { + } + + @Override + final public Map getConfigOverrides() { + Path tempDirWithPrefix; + try { + tempDirWithPrefix = Files.createTempDirectory("dhfs-test"); + } catch (IOException e) { + throw new RuntimeException(e); + } + var ret = new HashMap(); + ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString()); + ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString()); + ret.put("dhfs.objects.persistence", "lmdb"); + getConfigOverrides(ret); + return ret; + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TestDataCleaner.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TestDataCleaner.java new file mode 100644 index 00000000..957373ec --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TestDataCleaner.java @@ -0,0 +1,40 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Objects; + +@ApplicationScoped +public class TestDataCleaner { + @ConfigProperty(name = "dhfs.objects.persistence.files.root") + String tempDirectory; + + void init(@Observes @Priority(1) StartupEvent event) throws IOException { + try { + purgeDirectory(Path.of(tempDirectory).toFile()); + } catch (Exception ignored) { + Log.warn("Couldn't cleanup test data on init"); + } + } + + void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException { + purgeDirectory(Path.of(tempDirectory).toFile()); + } + + void purgeDirectory(File dir) { + for (File file : Objects.requireNonNull(dir.listFiles())) { + if (file.isDirectory()) + purgeDirectory(file); + file.delete(); + } + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java new file mode 100644 index 00000000..b49d163f --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects.data; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +public record Kid(JObjectKey key, String name) implements JData { + public Kid withName(String name) { + return new Kid(key, name); + } +} \ No newline at end of file diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java new file mode 100644 index 00000000..c6dcbbb0 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects.data; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +public record Parent(JObjectKey key, String name) implements JData { + public Parent withName(String name) { + return new Parent(key, name); + } +} \ No newline at end of file diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java new file mode 100644 index 00000000..483297ef --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java @@ -0,0 +1,113 @@ +package com.usatiuk.dhfs.objects.persistence; + + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.Just; +import com.usatiuk.dhfs.objects.TempDataProfile; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.RepeatedTest; + +import java.util.List; + +class Profiles { + public static class LmdbKvIteratorTestProfile extends TempDataProfile { + } +} + +@QuarkusTest +@TestProfile(Profiles.LmdbKvIteratorTestProfile.class) +public class LmdbKvIteratorTest { + + @Inject + LmdbObjectPersistentStore store; + + @RepeatedTest(100) + public void iteratorTest1() { + store.commitTx( + new TxManifestRaw( + List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), + Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), + Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))), + List.of() + ), -1, Runnable::run + ); + + var iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(3))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(2))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(2))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(3))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(2))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(1))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(1))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(3))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(4))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(0))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2))); + Assertions.assertTrue(iterator.hasNext()); + Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey()); + Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey()); + Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey()); + Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey()); + Just.checkIterator(iterator.reversed(), Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2}))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})), iterator.prev()); + Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.prev()); + Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.next()); + iterator.close(); + + store.commitTx(new TxManifestRaw( + List.of(), + List.of(JObjectKey.of(Long.toString(1)), JObjectKey.of(Long.toString(2)), JObjectKey.of(Long.toString(3))) + ), + -1, Runnable::run + ); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIteratorTest.java new file mode 100644 index 00000000..e4527110 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIteratorTest.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.JObjectKey; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +public class SnapshotKvIteratorTest { + + +} diff --git a/dhfs-parent/objects/src/test/resources/application.properties b/dhfs-parent/objects/src/test/resources/application.properties new file mode 100644 index 00000000..f02e185d --- /dev/null +++ b/dhfs-parent/objects/src/test/resources/application.properties @@ -0,0 +1,6 @@ +dhfs.objects.persistence=memory +quarkus.log.category."com.usatiuk".level=TRACE +quarkus.log.category."com.usatiuk".min-level=TRACE +quarkus.http.test-port=0 +quarkus.http.test-ssl-port=0 +dhfs.objects.persistence.snapshot-extra-checks=true \ No newline at end of file diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index 8597d81c..18e6a84f 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -15,6 +15,8 @@ kleppmanntree supportlib autoprotomap + objects + utils @@ -54,6 +56,11 @@ 1.18.34 provided + + net.openhft + zero-allocation-hashing + 0.16 + org.awaitility awaitility @@ -71,6 +78,11 @@ commons-collections4 4.5.0-M2 + + org.pcollections + pcollections + 4.0.2 + @@ -109,6 +121,7 @@ --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED + --add-opens=java.base/java.nio=ALL-UNNAMED ${skip.unit} true diff --git a/dhfs-parent/server-old/.dockerignore b/dhfs-parent/server-old/.dockerignore new file mode 100644 index 00000000..94810d00 --- /dev/null +++ b/dhfs-parent/server-old/.dockerignore @@ -0,0 +1,5 @@ +* +!target/*-runner +!target/*-runner.jar +!target/lib/* +!target/quarkus-app/* \ No newline at end of file diff --git a/dhfs-parent/server-old/.gitignore b/dhfs-parent/server-old/.gitignore new file mode 100644 index 00000000..8c7863e7 --- /dev/null +++ b/dhfs-parent/server-old/.gitignore @@ -0,0 +1,43 @@ +#Maven +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +release.properties +.flattened-pom.xml + +# Eclipse +.project +.classpath +.settings/ +bin/ + +# IntelliJ +.idea +*.ipr +*.iml +*.iws + +# NetBeans +nb-configuration.xml + +# Visual Studio Code +.vscode +.factorypath + +# OSX +.DS_Store + +# Vim +*.swp +*.swo + +# patch +*.orig +*.rej + +# Local environment +.env + +# Plugin directory +/.quarkus/cli/plugins/ diff --git a/dhfs-parent/server-old/Dockerfile b/dhfs-parent/server-old/Dockerfile new file mode 100644 index 00000000..62bace54 --- /dev/null +++ b/dhfs-parent/server-old/Dockerfile @@ -0,0 +1,2 @@ +FROM azul/zulu-openjdk-debian:21-jre-latest +RUN apt update && apt install -y libfuse2 curl \ No newline at end of file diff --git a/dhfs-parent/server-old/docker-compose.yml b/dhfs-parent/server-old/docker-compose.yml new file mode 100644 index 00000000..a6a0aefa --- /dev/null +++ b/dhfs-parent/server-old/docker-compose.yml @@ -0,0 +1,42 @@ +version: "3.2" + +services: + dhfs1: + build: . + privileged: true + devices: + - /dev/fuse + volumes: + - $HOME/dhfs/dhfs1:/dhfs_root + - $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared + - ./target/quarkus-app:/app + command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED + -Ddhfs.objects.persistence.files.root=/dhfs_root/p + -Ddhfs.objects.root=/dhfs_root/d + -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0 + -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005 + -jar /app/quarkus-run.jar" + ports: + - 8080:8080 + - 8081:8443 + - 5005:5005 + dhfs2: + build: . + privileged: true + devices: + - /dev/fuse + volumes: + - $HOME/dhfs/dhfs2:/dhfs_root + - $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared + - ./target/quarkus-app:/app + command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED + --add-exports java.base/jdk.internal.access=ALL-UNNAMED + -Ddhfs.objects.persistence.files.root=/dhfs_root/p + -Ddhfs.objects.root=/dhfs_root/d + -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0 + -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010 + -jar /app/quarkus-run.jar" + ports: + - 8090:8080 + - 8091:8443 + - 5010:5010 diff --git a/dhfs-parent/server-old/pom.xml b/dhfs-parent/server-old/pom.xml new file mode 100644 index 00000000..bb74c72a --- /dev/null +++ b/dhfs-parent/server-old/pom.xml @@ -0,0 +1,209 @@ + + + 4.0.0 + com.usatiuk.dhfs + server + 1.0.0-SNAPSHOT + + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + + + org.testcontainers + testcontainers + test + + + org.awaitility + awaitility + test + + + com.usatiuk + autoprotomap + 1.0-SNAPSHOT + + + com.usatiuk + autoprotomap-deployment + 1.0-SNAPSHOT + provided + + + org.bouncycastle + bcprov-jdk18on + 1.78.1 + + + org.bouncycastle + bcpkix-jdk18on + 1.78.1 + + + io.quarkus + quarkus-security + + + net.openhft + zero-allocation-hashing + + + io.quarkus + quarkus-grpc + + + io.quarkus + quarkus-arc + + + io.quarkus + quarkus-rest + + + io.quarkus + quarkus-rest-client + + + io.quarkus + quarkus-rest-client-jsonb + + + io.quarkus + quarkus-rest-jsonb + + + io.quarkus + quarkus-scheduler + + + io.quarkus + quarkus-junit5 + test + + + org.projectlombok + lombok + provided + + + com.github.SerCeMan + jnr-fuse + 44ed40f8ce + + + com.github.jnr + jnr-ffi + 2.2.16 + + + com.github.jnr + jnr-posix + 3.1.19 + + + com.github.jnr + jnr-constants + 0.10.4 + + + org.apache.commons + commons-lang3 + + + commons-io + commons-io + + + org.jboss.slf4j + slf4j-jboss-logmanager + test + + + commons-codec + commons-codec + + + org.apache.commons + commons-collections4 + + + org.apache.commons + commons-math3 + 3.6.1 + + + com.usatiuk + kleppmanntree + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + supportlib + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + objects + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + utils + 1.0-SNAPSHOT + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + 1C + false + classes + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + true + + + concurrent + + + 0.5 + + true + true + + + + + ${quarkus.platform.group-id} + quarkus-maven-plugin + ${quarkus.platform.version} + true + + + quarkus-plugin + + build + generate-code + generate-code-tests + + + + + + + diff --git a/dhfs-parent/kleppmanntree/src/lombok.config b/dhfs-parent/server-old/src/lombok.config similarity index 100% rename from dhfs-parent/kleppmanntree/src/lombok.config rename to dhfs-parent/server-old/src/lombok.config diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm b/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm new file mode 100644 index 00000000..b1de5988 --- /dev/null +++ b/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm @@ -0,0 +1,97 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/server-jvm +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/server-jvm +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-21:1.18 + +ENV LANGUAGE='en_US:en' + + +# We make four distinct layers so if there are application changes the library layers can be re-used +COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ +COPY --chown=185 target/quarkus-app/*.jar /deployments/ +COPY --chown=185 target/quarkus-app/app/ /deployments/app/ +COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] + diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar b/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar new file mode 100644 index 00000000..f66a1665 --- /dev/null +++ b/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar @@ -0,0 +1,93 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package -Dquarkus.package.jar.type=legacy-jar +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-21:1.18 + +ENV LANGUAGE='en_US:en' + + +COPY target/lib/* /deployments/lib/ +COPY target/*-runner.jar /deployments/quarkus-run.jar + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.native b/dhfs-parent/server-old/src/main/docker/Dockerfile.native new file mode 100644 index 00000000..226e7c71 --- /dev/null +++ b/dhfs-parent/server-old/src/main/docker/Dockerfile.native @@ -0,0 +1,27 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native -t quarkus/server . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/server +# +### +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro b/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro new file mode 100644 index 00000000..4bd4c6de --- /dev/null +++ b/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro @@ -0,0 +1,30 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# It uses a micro base image, tuned for Quarkus native executables. +# It reduces the size of the resulting container image. +# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/server +# +### +FROM quay.io/quarkus/quarkus-micro-image:2.0 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/server-old/src/main/java/DeadlockDetector.java b/dhfs-parent/server-old/src/main/java/DeadlockDetector.java new file mode 100644 index 00000000..7b275098 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/DeadlockDetector.java @@ -0,0 +1,63 @@ +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.util.Arrays; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +@ApplicationScoped +public class DeadlockDetector { + private final ExecutorService _executor = Executors.newSingleThreadExecutor(); + + void init(@Observes @Priority(1) StartupEvent event) { + _executor.submit(this::run); + } + + void shutdown(@Observes @Priority(100000) ShutdownEvent event) { + _executor.shutdownNow(); + } + + private void run() { + ThreadMXBean bean = ManagementFactory.getThreadMXBean(); + try { + while (!Thread.interrupted()) { + Thread.sleep(4000); + + long[] threadIds = bean.findDeadlockedThreads(); // Returns null if no threads are deadlocked. + + if (threadIds != null) { + ThreadInfo[] infos = bean.getThreadInfo(threadIds, Integer.MAX_VALUE); + + StringBuilder sb = new StringBuilder(); + + sb.append("Deadlock detected!\n"); + + for (ThreadInfo info : infos) { + StackTraceElement[] stack = info.getStackTrace(); + sb.append(info.getThreadName()).append("\n"); + sb.append("getLockedMonitors: ").append(Arrays.toString(info.getLockedMonitors())).append("\n"); + sb.append("getLockedSynchronizers: ").append(Arrays.toString(info.getLockedSynchronizers())).append("\n"); + sb.append("waiting on: ").append(info.getLockInfo()).append("\n"); + sb.append("locked by: ").append(info.getLockOwnerName()).append("\n"); + sb.append("Stack trace:\n"); + for (var e : stack) { + sb.append(e.toString()).append("\n"); + } + sb.append("==="); + } + + Log.error(sb); + } + } + } catch (InterruptedException e) { + } + Log.info("Deadlock detector thread exiting"); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java new file mode 100644 index 00000000..69e488c0 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java @@ -0,0 +1,21 @@ +package com.usatiuk.dhfs; + +import io.quarkus.runtime.Quarkus; +import io.quarkus.runtime.QuarkusApplication; +import io.quarkus.runtime.annotations.QuarkusMain; + +@QuarkusMain +public class Main { + public static void main(String... args) { + Quarkus.run(DhfsStorageServerApp.class, args); + } + + public static class DhfsStorageServerApp implements QuarkusApplication { + + @Override + public int run(String... args) throws Exception { + Quarkus.waitForExit(); + return 0; + } + } +} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java new file mode 100644 index 00000000..dcd379a8 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java @@ -0,0 +1,42 @@ +package com.usatiuk.dhfs; + +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.nio.file.Paths; + +@ApplicationScoped +public class ShutdownChecker { + private static final String dataFileName = "running"; + @ConfigProperty(name = "dhfs.objects.root") + String dataRoot; + boolean _cleanShutdown = true; + boolean _initialized = false; + + void init(@Observes @Priority(2) StartupEvent event) throws IOException { + Paths.get(dataRoot).toFile().mkdirs(); + Log.info("Initializing with root " + dataRoot); + if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { + _cleanShutdown = false; + Log.error("Unclean shutdown detected!"); + } else { + Paths.get(dataRoot).resolve(dataFileName).toFile().createNewFile(); + } + _initialized = true; + } + + void shutdown(@Observes @Priority(100000) ShutdownEvent event) throws IOException { + Paths.get(dataRoot).resolve(dataFileName).toFile().delete(); + } + + public boolean lastShutdownClean() { + if (!_initialized) throw new IllegalStateException("ShutdownChecker not initialized"); + return _cleanShutdown; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java new file mode 100644 index 00000000..46f8e283 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -0,0 +1,90 @@ +package com.usatiuk.dhfs.files.objects; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.files.conflicts.NoOpConflictResolver; +import com.usatiuk.dhfs.objects.jrepository.AssumedUnique; +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.jrepository.Leaf; +import com.usatiuk.dhfs.objects.persistence.ChunkDataP; +import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import net.openhft.hashing.LongTupleHashFunction; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +@AssumedUnique +@Leaf +public class ChunkData extends JObjectData { + final ChunkDataP _data; + + public ChunkData(ByteString bytes) { + super(); + _data = ChunkDataP.newBuilder() + .setData(bytes) + // TODO: There might be (most definitely) a copy there + .setName(Arrays.stream(LongTupleHashFunction.xx128().hashBytes(bytes.asReadOnlyByteBuffer())) + .mapToObj(Long::toHexString).collect(Collectors.joining())) + .build(); + } + + public ChunkData(ByteString bytes, String name) { + super(); + _data = ChunkDataP.newBuilder() + .setData(bytes) + .setName(name) + .build(); + } + + public ChunkData(ChunkDataP chunkDataP) { + super(); + _data = chunkDataP; + } + + ChunkDataP getData() { + return _data; + } + + public ByteString getBytes() { + return _data.getData(); + } + + public int getSize() { + return _data.getData().size(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ChunkData chunkData = (ChunkData) o; + return Objects.equals(getName(), chunkData.getName()); + } + + @Override + public int hashCode() { + return Objects.hashCode(getName()); + } + + @Override + public String getName() { + return _data.getName(); + } + + @Override + public Class getConflictResolver() { + return NoOpConflictResolver.class; + } + + @Override + public Collection extractRefs() { + return List.of(); + } + + @Override + public int estimateSize() { + return _data.getData().size(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java new file mode 100644 index 00000000..0c6fa4e8 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -0,0 +1,51 @@ +package com.usatiuk.dhfs.files.objects; + +import com.usatiuk.dhfs.files.conflicts.FileConflictResolver; +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import lombok.Getter; +import lombok.Setter; + +import java.util.*; + +public class File extends FsNode { + @Getter + private final NavigableMap _chunks; + @Getter + private final boolean _symlink; + @Getter + @Setter + private long _size = 0; + + public File(UUID uuid, long mode, boolean symlink) { + super(uuid, mode); + _symlink = symlink; + _chunks = new TreeMap<>(); + } + + public File(UUID uuid, long mode, boolean symlink, NavigableMap chunks) { + super(uuid, mode); + _symlink = symlink; + _chunks = chunks; + } + + @Override + public Class getConflictResolver() { + return FileConflictResolver.class; + } + + @Override + public Class getRefType() { + return ChunkData.class; + } + + @Override + public Collection extractRefs() { + return Collections.unmodifiableCollection(_chunks.values()); + } + + @Override + public int estimateSize() { + return _chunks.size() * 192; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java new file mode 100644 index 00000000..58678dd2 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java @@ -0,0 +1,51 @@ +package com.usatiuk.dhfs.files.service; + +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; +import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.objects.jrepository.JObject; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.Optional; + +public interface DhfsFileService { + Optional open(String name); + + Optional create(String name, long mode); + + Pair inoToParent(String ino); + + void mkdir(String name, long mode); + + Optional getattr(String name); + + Boolean chmod(String name, long mode); + + void unlink(String name); + + Boolean rename(String from, String to); + + Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs); + + Iterable readDir(String name); + + void updateFileSize(JObject file); + + Long size(String f); + + Optional read(String fileUuid, long offset, int length); + + Long write(String fileUuid, long offset, ByteString data); + + default Long write(String fileUuid, long offset, byte[] data) { + return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data)); + } + + Boolean truncate(String fileUuid, long length); + + String readlink(String uuid); + + ByteString readlinkBS(String uuid); + + String symlink(String oldpath, String newpath); +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java new file mode 100644 index 00000000..33b30d85 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -0,0 +1,814 @@ +package com.usatiuk.dhfs.files.service; + +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; +import com.usatiuk.dhfs.files.objects.ChunkData; +import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.files.objects.FsNode; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; +import com.usatiuk.dhfs.objects.jrepository.JMutator; +import com.usatiuk.dhfs.objects.jrepository.JObject; +import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.*; +import java.util.stream.StreamSupport; + +@ApplicationScoped +public class DhfsFileServiceImpl implements DhfsFileService { + @Inject + JObjectManager jObjectManager; + @Inject + JObjectTxManager jObjectTxManager; + + @ConfigProperty(name = "dhfs.files.target_chunk_size") + int targetChunkSize; + + @ConfigProperty(name = "dhfs.files.write_merge_threshold") + float writeMergeThreshold; + + @ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take") + float writeMergeMaxChunkToTake; + + @ConfigProperty(name = "dhfs.files.write_merge_limit") + float writeMergeLimit; + + @ConfigProperty(name = "dhfs.files.write_last_chunk_limit") + float writeLastChunkLimit; + + @ConfigProperty(name = "dhfs.files.use_hash_for_chunks") + boolean useHashForChunks; + + @ConfigProperty(name = "dhfs.files.allow_recursive_delete") + boolean allowRecursiveDelete; + + @ConfigProperty(name = "dhfs.objects.ref_verification") + boolean refVerification; + + @ConfigProperty(name = "dhfs.objects.write_log") + boolean writeLogging; + + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + JKleppmannTreeManager jKleppmannTreeManager; + + private JKleppmannTreeManager.JKleppmannTree _tree; + + private ChunkData createChunk(ByteString bytes) { + if (useHashForChunks) { + return new ChunkData(bytes); + } else { + return new ChunkData(bytes, persistentPeerDataService.getUniqueId()); + } + } + + void init(@Observes @Priority(500) StartupEvent event) { + Log.info("Initializing file service"); + _tree = jKleppmannTreeManager.getTree("fs"); + } + + private JObject getDirEntry(String name) { + var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); + if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND); + var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); + if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) + throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); + return (JObject) ret; + } + + private Optional> getDirEntryOpt(String name) { + var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); + if (res == null) return Optional.empty(); + var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); + if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) + throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); + return Optional.of((JObject) ret); + } + + @Override + public Optional getattr(String uuid) { + return jObjectTxManager.executeTx(() -> { + var ref = jObjectManager.get(uuid); + if (ref.isEmpty()) return Optional.empty(); + return ref.get().runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + GetattrRes ret; + if (d instanceof File f) { + ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.isSymlink() ? GetattrType.SYMLINK : GetattrType.FILE); + } else if (d instanceof JKleppmannTreeNode) { + ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); + } else { + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); + } + return Optional.of(ret); + }); + }); + } + + @Override + public Optional open(String name) { + return jObjectTxManager.executeTx(() -> { + try { + var ret = getDirEntry(name); + return Optional.of(ret.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) return f.getFileIno(); + else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return m.getName(); + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); + })); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.NOT_FOUND) { + return Optional.empty(); + } + throw e; + } + }); + } + + private void ensureDir(JObject entry) { + entry.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) + throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription(m.getName() + " is a file, not directory")); + else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return null; + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); + }); + } + + @Override + public Optional create(String name, long mode) { + return jObjectTxManager.executeTx(() -> { + Path path = Path.of(name); + var parent = getDirEntry(path.getParent().toString()); + + ensureDir(parent); + + String fname = path.getFileName().toString(); + + var fuuid = UUID.randomUUID(); + Log.debug("Creating file " + fuuid); + File f = new File(fuuid, mode, false); + + var newNodeId = _tree.getNewNodeId(); + var fobj = jObjectManager.putLocked(f, Optional.of(newNodeId)); + try { + _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); + } catch (Exception e) { + fobj.getMeta().removeRef(newNodeId); + throw e; + } finally { + fobj.rwUnlock(); + } + return Optional.of(f.getName()); + }); + } + + //FIXME: Slow.. + @Override + public Pair inoToParent(String ino) { + return jObjectTxManager.executeTx(() -> { + return _tree.findParent(w -> { + if (w.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) + if (f.getFileIno().equals(ino)) + return true; + return false; + }); + }); + } + + @Override + public void mkdir(String name, long mode) { + jObjectTxManager.executeTx(() -> { + Path path = Path.of(name); + var parent = getDirEntry(path.getParent().toString()); + ensureDir(parent); + + String dname = path.getFileName().toString(); + + Log.debug("Creating directory " + name); + + _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaDirectory(dname), _tree.getNewNodeId()); + }); + } + + @Override + public void unlink(String name) { + jObjectTxManager.executeTx(() -> { + var node = getDirEntryOpt(name).orElse(null); + JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) + if (!d.getNode().getChildren().isEmpty()) throw new DirectoryNotEmptyException(); + return d.getNode().getMeta(); + }); + + _tree.trash(meta, node.getMeta().getName()); + }); + } + + @Override + public Boolean rename(String from, String to) { + return jObjectTxManager.executeTx(() -> { + var node = getDirEntry(from); + JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> d.getNode().getMeta()); + + var toPath = Path.of(to); + var toDentry = getDirEntry(toPath.getParent().toString()); + ensureDir(toDentry); + + _tree.move(toDentry.getMeta().getName(), meta.withName(toPath.getFileName().toString()), node.getMeta().getName()); + + return true; + }); + } + + @Override + public Boolean chmod(String uuid, long mode) { + return jObjectTxManager.executeTx(() -> { + var dent = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); + + dent.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d, bump, i) -> { + if (d instanceof JKleppmannTreeNode) { + return null;//FIXME:? + } else if (d instanceof File f) { + bump.apply(); + f.setMtime(System.currentTimeMillis()); + f.setMode(mode); + } else { + throw new IllegalArgumentException(uuid + " is not a file"); + } + return null; + }); + + return true; + }); + } + + @Override + public Iterable readDir(String name) { + return jObjectTxManager.executeTx(() -> { + var found = getDirEntry(name); + + return found.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + if (!(d instanceof JKleppmannTreeNode) || !(d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) { + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + } + return new ArrayList<>(d.getNode().getChildren().keySet()); + }); + }); + } + + @Override + public Optional read(String fileUuid, long offset, int length) { + return jObjectTxManager.executeTx(() -> { + if (length < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); + if (offset < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); + + var fileOpt = jObjectManager.get(fileUuid); + if (fileOpt.isEmpty()) { + Log.error("File not found when trying to read: " + fileUuid); + return Optional.empty(); + } + var file = fileOpt.get(); + + try { + return file.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { + if (!(fileData instanceof File)) { + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + } + var chunksAll = ((File) fileData).getChunks(); + if (chunksAll.isEmpty()) { + return Optional.of(ByteString.empty()); + } + var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); + + if (chunksList.isEmpty()) { + return Optional.of(ByteString.empty()); + } + + var chunks = chunksList.iterator(); + ByteString buf = ByteString.empty(); + + long curPos = offset; + var chunk = chunks.next(); + + while (curPos < offset + length) { + var chunkPos = chunk.getKey(); + + long offInChunk = curPos - chunkPos; + + long toReadInChunk = (offset + length) - curPos; + + var chunkBytes = readChunk(chunk.getValue()); + + long readableLen = chunkBytes.size() - offInChunk; + + var toReadReally = Math.min(readableLen, toReadInChunk); + + if (toReadReally < 0) break; + + buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); + + curPos += toReadReally; + + if (readableLen > toReadInChunk) + break; + + if (!chunks.hasNext()) break; + + chunk = chunks.next(); + } + + // FIXME: + return Optional.of(buf); + }); + } catch (Exception e) { + Log.error("Error reading file: " + fileUuid, e); + return Optional.empty(); + } + }); + } + + private ByteString readChunk(String uuid) { + var chunkRead = jObjectManager.get(uuid).orElse(null); + + if (chunkRead == null) { + Log.error("Chunk requested not found: " + uuid); + throw new StatusRuntimeException(Status.NOT_FOUND); + } + + return chunkRead.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + if (!(d instanceof ChunkData cd)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + return cd.getBytes(); + }); + } + + private int getChunkSize(String uuid) { + return readChunk(uuid).size(); + } + + private void cleanupChunks(File f, Collection uuids) { + // FIXME: + var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet(); + for (var cuuid : uuids) { + try { + if (inFile.contains(cuuid)) continue; + jObjectManager.get(cuuid) + .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, + (m, d, b, v) -> { + m.removeRef(f.getName()); + return null; + })); + } catch (Exception e) { + Log.error("Error when cleaning chunk " + cuuid, e); + } + } + } + + @Override + public Long write(String fileUuid, long offset, ByteString data) { + return jObjectTxManager.executeTx(() -> { + if (offset < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); + + // FIXME: + var file = (JObject) jObjectManager.get(fileUuid).orElse(null); + if (file == null) { + Log.error("File not found when trying to read: " + fileUuid); + return -1L; + } + + file.rwLockNoCopy(); + try { + file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + // FIXME: + if (!(file.getData() instanceof File)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + if (writeLogging) { + Log.info("Writing to file: " + file.getMeta().getName() + " size=" + size(fileUuid) + " " + + offset + " " + data.size()); + } + + if (size(fileUuid) < offset) + truncate(fileUuid, offset); + + // FIXME: Some kind of immutable interface? + var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); + var first = chunksAll.floorEntry(offset); + var last = chunksAll.lowerEntry(offset + data.size()); + NavigableMap removedChunks = new TreeMap<>(); + + long start = 0; + + NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); + NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); + + if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { + beforeFirst = chunksAll; + afterLast = Collections.emptyNavigableMap(); + first = null; + last = null; + start = offset; + } else if (!chunksAll.isEmpty()) { + var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); + removedChunks.putAll(between); + start = first.getKey(); + } + + ByteString pendingWrites = ByteString.empty(); + + if (first != null && first.getKey() < offset) { + var chunkBytes = readChunk(first.getValue()); + pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); + } + pendingWrites = pendingWrites.concat(data); + + if (last != null) { + var lchunkBytes = readChunk(last.getValue()); + if (last.getKey() + lchunkBytes.size() > offset + data.size()) { + var startInFile = offset + data.size(); + var startInChunk = startInFile - last.getKey(); + pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); + } + } + + int combinedSize = pendingWrites.size(); + + if (targetChunkSize > 0) { + if (combinedSize < (targetChunkSize * writeMergeThreshold)) { + boolean leftDone = false; + boolean rightDone = false; + while (!leftDone && !rightDone) { + if (beforeFirst.isEmpty()) leftDone = true; + if (!beforeFirst.isEmpty() || !leftDone) { + var takeLeft = beforeFirst.lastEntry(); + + var cuuid = takeLeft.getValue(); + + if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { + leftDone = true; + continue; + } + + if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { + leftDone = true; + continue; + } + + // FIXME: (and test this) + beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); + start = takeLeft.getKey(); + pendingWrites = readChunk(cuuid).concat(pendingWrites); + combinedSize += getChunkSize(cuuid); + removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); + } + if (afterLast.isEmpty()) rightDone = true; + if (!afterLast.isEmpty() && !rightDone) { + var takeRight = afterLast.firstEntry(); + + var cuuid = takeRight.getValue(); + + if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { + rightDone = true; + continue; + } + + if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { + rightDone = true; + continue; + } + + // FIXME: (and test this) + afterLast = afterLast.tailMap(takeRight.getKey(), false); + pendingWrites = pendingWrites.concat(readChunk(cuuid)); + combinedSize += getChunkSize(cuuid); + removedChunks.put(takeRight.getKey(), takeRight.getValue()); + } + } + } + } + + NavigableMap newChunks = new TreeMap<>(); + + { + int cur = 0; + while (cur < combinedSize) { + int end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { + end = Math.min(cur + targetChunkSize, combinedSize); + } else { + end = combinedSize; + } + } + + var thisChunk = pendingWrites.substring(cur, end); + + ChunkData newChunkData = createChunk(thisChunk); + //FIXME: + jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); + newChunks.put(start, newChunkData.getName()); + + start += thisChunk.size(); + cur = end; + } + } + + file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); + + cleanupChunks(file.getData(), removedChunks.values()); + updateFileSize((JObject) file); + } finally { + file.rwUnlock(); + } + + return (long) data.size(); + }); + } + + @Override + public Boolean truncate(String fileUuid, long length) { + return jObjectTxManager.executeTx(() -> { + if (length < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); + + var file = (JObject) jObjectManager.get(fileUuid).orElse(null); + if (file == null) { + Log.error("File not found when trying to read: " + fileUuid); + return false; + } + + if (length == 0) { + file.rwLockNoCopy(); + try { + file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + + var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getData().getChunks())); + + file.mutate(new JMutator<>() { + long oldMtime; + + @Override + public boolean mutate(File object) { + oldMtime = object.getMtime(); + object.getChunks().clear(); + return true; + } + + @Override + public void revert(File object) { + object.setMtime(oldMtime); + object.getChunks().putAll(oldChunks); + } + }); + cleanupChunks(file.getData(), oldChunks.values()); + updateFileSize((JObject) file); + } catch (Exception e) { + Log.error("Error writing file chunks: " + fileUuid, e); + return false; + } finally { + file.rwUnlock(); + } + return true; + } + + file.rwLockNoCopy(); + try { + file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + + var curSize = size(fileUuid); + if (curSize == length) return true; + + var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); + NavigableMap removedChunks = new TreeMap<>(); + NavigableMap newChunks = new TreeMap<>(); + + if (curSize < length) { + long combinedSize = (length - curSize); + + long start = curSize; + + // Hack + HashMap zeroCache = new HashMap<>(); + + { + long cur = 0; + while (cur < combinedSize) { + long end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * 1.5)) { + end = cur + targetChunkSize; + } else { + end = combinedSize; + } + } + + if (!zeroCache.containsKey(end - cur)) + zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); + + ChunkData newChunkData = createChunk(zeroCache.get(end - cur)); + //FIXME: + jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); + newChunks.put(start, newChunkData.getName()); + + start += newChunkData.getSize(); + cur = end; + } + } + } else { + var tail = chunksAll.lowerEntry(length); + var afterTail = chunksAll.tailMap(tail.getKey(), false); + + removedChunks.put(tail.getKey(), tail.getValue()); + removedChunks.putAll(afterTail); + + var tailBytes = readChunk(tail.getValue()); + var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); + + ChunkData newChunkData = createChunk(newChunk); + //FIXME: + jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); + newChunks.put(tail.getKey(), newChunkData.getName()); + } + + file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); + + cleanupChunks(file.getData(), removedChunks.values()); + updateFileSize((JObject) file); + return true; + } catch (Exception e) { + Log.error("Error reading file: " + fileUuid, e); + return false; + } finally { + file.rwUnlock(); + } + }); + } + + @Override + public String readlink(String uuid) { + return jObjectTxManager.executeTx(() -> { + return readlinkBS(uuid).toStringUtf8(); + }); + } + + @Override + public ByteString readlinkBS(String uuid) { + return jObjectTxManager.executeTx(() -> { + var fileOpt = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); + + return fileOpt.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { + if (!(fileData instanceof File)) { + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + } + + if (!((File) fileData).isSymlink()) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Not a symlink: " + uuid)); + + return read(uuid, 0, Math.toIntExact(size(uuid))).get(); + }); + }); + } + + @Override + public String symlink(String oldpath, String newpath) { + return jObjectTxManager.executeTx(() -> { + Path path = Path.of(newpath); + var parent = getDirEntry(path.getParent().toString()); + + ensureDir(parent); + + String fname = path.getFileName().toString(); + + var fuuid = UUID.randomUUID(); + Log.debug("Creating file " + fuuid); + + File f = new File(fuuid, 0, true); + var newNodeId = _tree.getNewNodeId(); + ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); + + f.getChunks().put(0L, newChunkData.getName()); + + jObjectManager.put(newChunkData, Optional.of(f.getName())); + var newFile = jObjectManager.putLocked(f, Optional.of(newNodeId)); + try { + updateFileSize(newFile); + } finally { + newFile.rwUnlock(); + } + + _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); + return f.getName(); + }); + } + + @Override + public Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs) { + return jObjectTxManager.executeTx(() -> { + var file = jObjectManager.get(fileUuid).orElseThrow( + () -> new StatusRuntimeException(Status.NOT_FOUND.withDescription( + "File not found for setTimes: " + fileUuid)) + ); + + file.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, fileData, bump, i) -> { + if (fileData instanceof JKleppmannTreeNode) return null; // FIXME: + if (!(fileData instanceof FsNode fd)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + bump.apply(); + fd.setMtime(mtimeMs); + return null; + }); + + return true; + }); + } + + @Override + public void updateFileSize(JObject file) { + jObjectTxManager.executeTx(() -> { + file.rwLockNoCopy(); + try { + file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + if (!(file.getData() instanceof File fd)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + long realSize = 0; + + var last = fd.getChunks().lastEntry(); + if (last != null) { + var lastSize = getChunkSize(last.getValue()); + realSize = last.getKey() + lastSize; + } + + if (realSize != fd.getSize()) { + long finalRealSize = realSize; + file.mutate(new JMutator() { + long oldSize; + + @Override + public boolean mutate(File object) { + oldSize = object.getSize(); + object.setSize(finalRealSize); + return true; + } + + @Override + public void revert(File object) { + object.setSize(oldSize); + } + }); + } + } catch (Exception e) { + Log.error("Error updating file size: " + file.getMeta().getName(), e); + } finally { + file.rwUnlock(); + } + }); + } + + @Override + public Long size(String uuid) { + return jObjectTxManager.executeTx(() -> { + var read = jObjectManager.get(uuid) + .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); + + try { + return read.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (fsNodeData, fileData) -> { + if (!(fileData instanceof File fd)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + return fd.getSize(); + }); + } catch (Exception e) { + Log.error("Error reading file: " + uuid, e); + return -1L; + } + }); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java new file mode 100644 index 00000000..f13096f9 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.files.service; + +public class DirectoryNotEmptyException extends RuntimeException { + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java new file mode 100644 index 00000000..3240a6b4 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.files.service; + +public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) { +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java new file mode 100644 index 00000000..ebcd4868 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.files.service; + +public enum GetattrType { + FILE, + DIRECTORY, + SYMLINK +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java new file mode 100644 index 00000000..0fa8ee29 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java @@ -0,0 +1,391 @@ +package com.usatiuk.dhfs.fuse; + +import com.google.protobuf.UnsafeByteOperations; +import com.sun.security.auth.module.UnixSystem; +import com.usatiuk.dhfs.files.service.DhfsFileService; +import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException; +import com.usatiuk.dhfs.files.service.GetattrRes; +import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; +import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; +import com.usatiuk.kleppmanntree.AlreadyExistsException; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import jnr.ffi.Pointer; +import org.apache.commons.lang3.SystemUtils; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import ru.serce.jnrfuse.ErrorCodes; +import ru.serce.jnrfuse.FuseFillDir; +import ru.serce.jnrfuse.FuseStubFS; +import ru.serce.jnrfuse.struct.FileStat; +import ru.serce.jnrfuse.struct.FuseFileInfo; +import ru.serce.jnrfuse.struct.Statvfs; +import ru.serce.jnrfuse.struct.Timespec; + +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Optional; + +import static jnr.posix.FileStat.*; + +@ApplicationScoped +public class DhfsFuse extends FuseStubFS { + private static final int blksize = 1048576; + private static final int iosize = 1048576; + @Inject + ObjectPersistentStore persistentStore; // FIXME? + @ConfigProperty(name = "dhfs.fuse.root") + String root; + @ConfigProperty(name = "dhfs.fuse.enabled") + boolean enabled; + @ConfigProperty(name = "dhfs.fuse.debug") + Boolean debug; + @ConfigProperty(name = "dhfs.files.target_chunk_size") + int targetChunkSize; + @Inject + JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors; + @Inject + DhfsFileService fileService; + + void init(@Observes @Priority(100000) StartupEvent event) { + if (!enabled) return; + Paths.get(root).toFile().mkdirs(); + Log.info("Mounting with root " + root); + + var uid = new UnixSystem().getUid(); + var gid = new UnixSystem().getGid(); + + var opts = new ArrayList(); + + // Assuming macFuse + if (SystemUtils.IS_OS_MAC) { + opts.add("-o"); + opts.add("iosize=" + iosize); + } else if (SystemUtils.IS_OS_LINUX) { + // FIXME: There's something else missing: the writes still seem to be 32k max +// opts.add("-o"); +// opts.add("large_read"); + opts.add("-o"); + opts.add("big_writes"); + opts.add("-o"); + opts.add("max_read=" + iosize); + opts.add("-o"); + opts.add("max_write=" + iosize); + } + opts.add("-o"); + opts.add("auto_cache"); + opts.add("-o"); + opts.add("uid=" + uid); + opts.add("-o"); + opts.add("gid=" + gid); + + mount(Paths.get(root), false, debug, opts.toArray(String[]::new)); + } + + void shutdown(@Observes @Priority(1) ShutdownEvent event) { + if (!enabled) return; + Log.info("Unmounting"); + umount(); + Log.info("Unmounted"); + } + + @Override + public int statfs(String path, Statvfs stbuf) { + try { + stbuf.f_frsize.set(blksize); + stbuf.f_bsize.set(blksize); + stbuf.f_blocks.set(persistentStore.getTotalSpace() / blksize); // total data blocks in file system + stbuf.f_bfree.set(persistentStore.getFreeSpace() / blksize); // free blocks in fs + stbuf.f_bavail.set(persistentStore.getUsableSpace() / blksize); // avail blocks in fs + stbuf.f_files.set(1000); //FIXME: + stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME: + stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME: + stbuf.f_namemax.set(2048); + return super.statfs(path, stbuf); + } catch (Exception e) { + Log.error("When statfs " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int getattr(String path, FileStat stat) { + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var uuid = fileOpt.get(); + Optional found = fileService.getattr(uuid); + if (found.isEmpty()) { + return -ErrorCodes.ENOENT(); + } + switch (found.get().type()) { + case FILE -> { + stat.st_mode.set(S_IFREG | found.get().mode()); + stat.st_nlink.set(1); + stat.st_size.set(fileService.size(uuid)); + } + case DIRECTORY -> { + stat.st_mode.set(S_IFDIR | found.get().mode()); + stat.st_nlink.set(2); + } + case SYMLINK -> { + stat.st_mode.set(S_IFLNK | 0777); + stat.st_nlink.set(1); + stat.st_size.set(fileService.size(uuid)); + } + } + + // FIXME: Race? + stat.st_ctim.tv_sec.set(found.get().ctime() / 1000); + stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000); + stat.st_mtim.tv_sec.set(found.get().mtime() / 1000); + stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000); + stat.st_atim.tv_sec.set(found.get().mtime() / 1000); + stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000); + stat.st_blksize.set(blksize); + } catch (Exception e) { + Log.error("When getattr " + path, e); + return -ErrorCodes.EIO(); + } catch (Throwable e) { + Log.error("When getattr " + path, e); + return -ErrorCodes.EIO(); + } + return 0; + } + + @Override + public int utimens(String path, Timespec[] timespec) { + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var res = fileService.setTimes(file, + timespec[0].tv_sec.get() * 1000, + timespec[1].tv_sec.get() * 1000); + if (!res) return -ErrorCodes.EINVAL(); + else return 0; + } catch (Exception e) { + Log.error("When utimens " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int open(String path, FuseFileInfo fi) { + try { + if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT(); + return 0; + } catch (Exception e) { + Log.error("When open " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { + if (size < 0) return -ErrorCodes.EINVAL(); + if (offset < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var read = fileService.read(fileOpt.get(), offset, (int) size); + if (read.isEmpty()) return 0; + UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); + return read.get().size(); + } catch (Exception e) { + Log.error("When reading " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { + if (offset < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var buffer = UninitializedByteBuffer.allocateUninitialized((int) size); + + jnrPtrByteOutputAccessors.getUnsafe().copyMemory( + buf.address(), + jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer), + size + ); + + var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer)); + return written.intValue(); + } catch (Exception e) { + Log.error("When writing " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int create(String path, long mode, FuseFileInfo fi) { + try { + var ret = fileService.create(path, mode); + if (ret.isEmpty()) return -ErrorCodes.ENOSPC(); + else return 0; + } catch (Exception e) { + Log.error("When creating " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int mkdir(String path, long mode) { + try { + fileService.mkdir(path, mode); + return 0; + } catch (AlreadyExistsException aex) { + return -ErrorCodes.EEXIST(); + } catch (Exception e) { + Log.error("When creating dir " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int rmdir(String path) { + try { + fileService.unlink(path); + return 0; + } catch (DirectoryNotEmptyException ex) { + return -ErrorCodes.ENOTEMPTY(); + } catch (Exception e) { + Log.error("When removing dir " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int rename(String path, String newName) { + try { + var ret = fileService.rename(path, newName); + if (!ret) return -ErrorCodes.ENOENT(); + else return 0; + } catch (Exception e) { + Log.error("When renaming " + path, e); + return -ErrorCodes.EIO(); + } + + } + + @Override + public int unlink(String path) { + try { + fileService.unlink(path); + return 0; + } catch (Exception e) { + Log.error("When unlinking " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int truncate(String path, long size) { + if (size < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var ok = fileService.truncate(file, size); + if (ok) + return 0; + else + return -ErrorCodes.ENOSPC(); + } catch (Exception e) { + Log.error("When truncating " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int chmod(String path, long mode) { + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var ret = fileService.chmod(fileOpt.get(), mode); + if (ret) return 0; + else return -ErrorCodes.EINVAL(); + } catch (Exception e) { + Log.error("When chmod " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int readdir(String path, Pointer buf, FuseFillDir filler, long offset, FuseFileInfo fi) { + try { + Iterable found; + try { + found = fileService.readDir(path); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode().equals(Status.NOT_FOUND.getCode())) + return -ErrorCodes.ENOENT(); + else throw e; + } + + filler.apply(buf, ".", null, 0); + filler.apply(buf, "..", null, 0); + + for (var c : found) { + filler.apply(buf, c, null, 0); + } + + return 0; + } catch (Exception e) { + Log.error("When readdir " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int readlink(String path, Pointer buf, long size) { + if (size < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var read = fileService.readlinkBS(fileOpt.get()); + if (read.isEmpty()) return 0; + UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); + buf.putByte(Math.min(size - 1, read.size()), (byte) 0); + return 0; + } catch (Exception e) { + Log.error("When reading " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int chown(String path, long uid, long gid) { + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + return 0; + } catch (Exception e) { + Log.error("When chown " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int symlink(String oldpath, String newpath) { + try { + var ret = fileService.symlink(oldpath, newpath); + if (ret == null) return -ErrorCodes.EEXIST(); + else return 0; + } catch (Exception e) { + Log.error("When creating " + newpath, e); + return -ErrorCodes.EIO(); + } + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java new file mode 100644 index 00000000..d2790516 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java @@ -0,0 +1,64 @@ +package com.usatiuk.dhfs.fuse; + +import com.google.protobuf.ByteOutput; +import jnr.ffi.Pointer; + +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; + +public class JnrPtrByteOutput extends ByteOutput { + private final Pointer _backing; + private final long _size; + private final JnrPtrByteOutputAccessors _accessors; + private long _pos; + + public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) { + _backing = backing; + _size = size; + _pos = 0; + _accessors = accessors; + } + + @Override + public void write(byte value) { + throw new UnsupportedOperationException(); + } + + @Override + public void write(byte[] value, int offset, int length) { + if (length + _pos > _size) throw new IndexOutOfBoundsException(); + _backing.put(_pos, value, offset, length); + _pos += length; + } + + @Override + public void writeLazy(byte[] value, int offset, int length) { + if (length + _pos > _size) throw new IndexOutOfBoundsException(); + _backing.put(_pos, value, offset, length); + _pos += length; + } + + @Override + public void write(ByteBuffer value) { + var rem = value.remaining(); + if (rem + _pos > _size) throw new IndexOutOfBoundsException(); + + if (value.isDirect()) { + if (value instanceof MappedByteBuffer mb) { + mb.load(); + } + long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position(); + var out = _backing.address() + _pos; + _accessors.getUnsafe().copyMemory(addr, out, rem); + } else { + throw new UnsupportedOperationException(); + } + + _pos += rem; + } + + @Override + public void writeLazy(ByteBuffer value) { + write(value); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java new file mode 100644 index 00000000..78cc8ff4 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java @@ -0,0 +1,24 @@ +package com.usatiuk.dhfs.fuse; + +import jakarta.inject.Singleton; +import jdk.internal.access.JavaNioAccess; +import jdk.internal.access.SharedSecrets; +import lombok.Getter; +import sun.misc.Unsafe; + +import java.lang.reflect.Field; + +@Singleton +class JnrPtrByteOutputAccessors { + @Getter + JavaNioAccess _nioAccess; + @Getter + Unsafe _unsafe; + + JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException { + _nioAccess = SharedSecrets.getJavaNioAccess(); + Field f = Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + _unsafe = (Unsafe) f.get(null); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java new file mode 100644 index 00000000..2743bf48 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -0,0 +1,566 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.*; +import com.usatiuk.dhfs.objects.jrepository.*; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import com.usatiuk.dhfs.objects.repository.opsupport.OpObject; +import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; +import com.usatiuk.dhfs.objects.repository.opsupport.OpSender; +import com.usatiuk.kleppmanntree.*; +import com.usatiuk.dhfs.utils.VoidFn; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; + +@ApplicationScoped +public class JKleppmannTreeManager { + private static final String dataFileName = "trees"; + private final ConcurrentHashMap _trees = new ConcurrentHashMap<>(); + @Inject + JKleppmannTreePeerInterface jKleppmannTreePeerInterface; + @Inject + OpSender opSender; + @Inject + OpObjectRegistry opObjectRegistry; + @Inject + JObjectManager jObjectManager; + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + JObjectTxManager jObjectTxManager; + @Inject + SoftJObjectFactory softJObjectFactory; + @Inject + JKleppmannTreePeerInterface peerInterface; + + public JKleppmannTree getTree(String name) { + return _trees.computeIfAbsent(name, this::createTree); + } + + private JKleppmannTree createTree(String name) { + return jObjectTxManager.executeTx(() -> { + var data = jObjectManager.get(JKleppmannTreePersistentData.nameFromTreeName(name)).orElse(null); + if (data == null) { + data = jObjectManager.put(new JKleppmannTreePersistentData(name), Optional.empty()); + } + var tree = new JKleppmannTree(name); + opObjectRegistry.registerObject(tree); + return tree; + }); + } + + public class JKleppmannTree implements OpObject { + private final KleppmannTree _tree; + + private final SoftJObject _persistentData; + + private final JKleppmannTreeStorageInterface _storageInterface; + private final JKleppmannTreeClock _clock; + + private final String _treeName; + + JKleppmannTree(String treeName) { + _treeName = treeName; + + _persistentData = softJObjectFactory.create(JKleppmannTreePersistentData.class, JKleppmannTreePersistentData.nameFromTreeName(treeName)); + + _storageInterface = new JKleppmannTreeStorageInterface(); + _clock = new JKleppmannTreeClock(); + + _tree = new KleppmannTree<>(_storageInterface, peerInterface, _clock, new JOpRecorder()); + } + + public String traverse(List names) { + return _tree.traverse(names); + } + + public String getNewNodeId() { + return _storageInterface.getNewNodeId(); + } + + public void move(String newParent, JKleppmannTreeNodeMeta newMeta, String node) { + _tree.move(newParent, newMeta, node); + } + + public void trash(JKleppmannTreeNodeMeta newMeta, String node) { + _tree.move(_storageInterface.getTrashId(), newMeta.withName(node), node); + } + + @Override + public boolean hasPendingOpsForHost(UUID host) { + return _persistentData.get() + .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, + (m, d) -> d.getQueues().containsKey(host) && + !d.getQueues().get(host).isEmpty() + ); + } + + @Override + public List getPendingOpsForHost(UUID host, int limit) { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + if (d.getQueues().containsKey(host)) { + var queue = d.getQueues().get(host); + ArrayList collected = new ArrayList<>(); + + for (var node : queue.entrySet()) { + collected.add(new JKleppmannTreeOpWrapper(node.getValue())); + if (collected.size() >= limit) break; + } + + return collected; + } + return List.of(); + }); + } + + @Override + public String getId() { + return _treeName; + } + + @Override + public void commitOpForHost(UUID host, Op op) { + if (!(op instanceof JKleppmannTreeOpWrapper jop)) + throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + + var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue(); + if (!Objects.equals(jop.getOp(), got)) + throw new IllegalArgumentException("Committed op push was not the oldest"); + + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.getQueues().get(host).pollFirstEntry(); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp()); + } + }); + + } + + @Override + public void pushBootstrap(UUID host) { + _tree.recordBoostrapFor(host); + } + + public Pair findParent(Function predicate) { + return _tree.findParent(predicate); + } + + @Override + public boolean acceptExternalOp(UUID from, Op op) { + if (op instanceof JKleppmannTreePeriodicPushOp pushOp) { + return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp()); + } + + if (!(op instanceof JKleppmannTreeOpWrapper jop)) + throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); + + JObject fileRef; + if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { + var fino = f.getFileIno(); + fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId())); + } else { + fileRef = null; + } + + if (Log.isTraceEnabled()) + Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName()); + + try { + _tree.applyExternalOp(from, jop.getOp()); + } catch (Exception e) { + Log.error("Error applying external op", e); + throw e; + } finally { + // FIXME: + // Fixup the ref if it didn't really get applied + + if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile)) + Log.error("Could not create child of pushed op: " + jop.getOp()); + + if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { + if (fileRef != null) { + var got = jObjectManager.get(jop.getOp().childId()).orElse(null); + + VoidFn remove = () -> { + fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { + m.removeRef(jop.getOp().childId()); + }); + }; + + if (got == null) { + remove.apply(); + } else { + try { + got.rLock(); + try { + got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno())) + remove.apply(); + } finally { + got.rUnlock(); + } + } catch (DeletedObjectAccessException dex) { + remove.apply(); + } + } + } + } + } + return true; + } + + @Override + public Op getPeriodicPushOp() { + return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp()); + } + + @Override + public void addToTx() { + // FIXME: a hack + _persistentData.get().rwLockNoCopy(); + _persistentData.get().rwUnlock(); + } + + private class JOpRecorder implements OpRecorder { + @Override + public void recordOp(OpMove op) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + var hostUuds = persistentPeerDataService.getHostUuids().stream().toList(); + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.recordOp(hostUuds, op); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.removeOp(hostUuds, op); + } + }); + opSender.push(JKleppmannTree.this); + } + + @Override + public void recordOpForPeer(UUID peer, OpMove op) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.recordOp(peer, op); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.removeOp(peer, op); + } + }); + opSender.push(JKleppmannTree.this); + } + } + + private class JKleppmannTreeClock implements Clock { + @Override + public Long getTimestamp() { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + var ret = _persistentData.get().getData().getClock().peekTimestamp() + 1; + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.getClock().getTimestamp(); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getClock().ungetTimestamp(); + } + }); + return ret; + } + + @Override + public Long peekTimestamp() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getClock().peekTimestamp()); + } + + @Override + public Long updateTimestamp(Long receivedTimestamp) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + _persistentData.get().mutate(new JMutator() { + Long _old; + + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + _old = object.getClock().updateTimestamp(receivedTimestamp); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getClock().setTimestamp(_old); + } + }); + return _persistentData.get().getData().getClock().peekTimestamp(); + } + } + + public class JKleppmannTreeStorageInterface implements StorageInterface { + private final LogWrapper _logWrapper = new LogWrapper(); + private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper(); + + public JKleppmannTreeStorageInterface() { + if (jObjectManager.get(getRootId()).isEmpty()) { + putNode(new JKleppmannTreeNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory("")))); + putNode(new JKleppmannTreeNode(new TreeNode<>(getTrashId(), null, null))); + } + } + + public JObject putNode(JKleppmannTreeNode node) { + return jObjectManager.put(node, Optional.ofNullable(node.getNode().getParent())); + } + + public JObject putNodeLocked(JKleppmannTreeNode node) { + return jObjectManager.putLocked(node, Optional.ofNullable(node.getNode().getParent())); + } + + @Override + public String getRootId() { + return _treeName + "_jt_root"; + } + + @Override + public String getTrashId() { + return _treeName + "_jt_trash"; + } + + @Override + public String getNewNodeId() { + return persistentPeerDataService.getUniqueId(); + } + + @Override + public JKleppmannTreeNodeWrapper getById(String id) { + var got = jObjectManager.get(id); + if (got.isEmpty()) return null; + return new JKleppmannTreeNodeWrapper((JObject) got.get()); + } + + @Override + public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { + return new JKleppmannTreeNodeWrapper(putNodeLocked(new JKleppmannTreeNode(node))); + } + + @Override + public void removeNode(String id) {} + + @Override + public LogInterface getLog() { + return _logWrapper; + } + + @Override + public PeerTimestampLogInterface getPeerTimestampLog() { + return _peerLogWrapper; + } + + @Override + public void rLock() { + _persistentData.get().rLock(); + } + + @Override + public void rUnlock() { + _persistentData.get().rUnlock(); + } + + @Override + public void rwLock() { + _persistentData.get().rwLockNoCopy(); + } + + @Override + public void rwUnlock() { + _persistentData.get().rwUnlock(); + } + + @Override + public void assertRwLock() { + _persistentData.get().assertRwLock(); + } + + private class PeerLogWrapper implements PeerTimestampLogInterface { + + @Override + public Long getForPeer(UUID peerId) { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, + (m, d) -> d.getPeerTimestampLog().get(peerId)); + } + + @Override + public void putForPeer(UUID peerId, Long timestamp) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + _persistentData.get().mutate(new JMutator() { + Long old; + + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + old = object.getPeerTimestampLog().put(peerId, timestamp); + return !Objects.equals(old, timestamp); + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + if (old != null) + object.getPeerTimestampLog().put(peerId, old); + else + object.getPeerTimestampLog().remove(peerId, timestamp); + } + }); + } + } + + private class LogWrapper implements LogInterface { + @Override + public Pair, LogRecord> peekOldest() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + var ret = d.getLog().firstEntry(); + if (ret == null) return null; + return Pair.of(ret); + }); + } + + @Override + public Pair, LogRecord> takeOldest() { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + + var ret = _persistentData.get().getData().getLog().firstEntry(); + if (ret != null) + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.getLog().pollFirstEntry(); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getLog().put(ret.getKey(), ret.getValue()); + } + }); + return Pair.of(ret); + } + + @Override + public Pair, LogRecord> peekNewest() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + var ret = d.getLog().lastEntry(); + if (ret == null) return null; + return Pair.of(ret); + }); + } + + @Override + public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + var tail = d.getLog().tailMap(since, inclusive); + return tail.entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); + }); + } + + @Override + public List, LogRecord>> getAll() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + return d.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); + }); + } + + @Override + public boolean isEmpty() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + return d.getLog().isEmpty(); + }); + } + + @Override + public boolean containsKey(CombinedTimestamp timestamp) { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + return d.getLog().containsKey(timestamp); + }); + } + + @Override + public long size() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + return (long) d.getLog().size(); + }); + } + + @Override + public void put(CombinedTimestamp timestamp, LogRecord record) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + if (_persistentData.get().getData().getLog().containsKey(timestamp)) + throw new IllegalStateException("Overwriting log entry?"); + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.getLog().put(timestamp, record); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getLog().remove(timestamp, record); + } + }); + } + + @Override + public void replace(CombinedTimestamp timestamp, LogRecord record) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + _persistentData.get().mutate(new JMutator() { + LogRecord old; + + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + old = object.getLog().put(timestamp, record); + return !Objects.equals(old, record); + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + if (old != null) + object.getLog().put(timestamp, old); + else + object.getLog().remove(timestamp, record); + } + }); + } + } + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java new file mode 100644 index 00000000..4612f8fc --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -0,0 +1,30 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; +import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import com.usatiuk.kleppmanntree.OpMove; +import lombok.Getter; + +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +// Wrapper to avoid having to specify generic types +public class JKleppmannTreeOpWrapper implements Op { + @Getter + private final OpMove _op; + + public JKleppmannTreeOpWrapper(OpMove op) { + if (op == null) throw new IllegalArgumentException("op shouldn't be null"); + _op = op; + } + + @Override + public Collection getEscapedRefs() { + if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { + return List.of(mf.getFileIno()); + } + return List.of(); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java new file mode 100644 index 00000000..39b5d484 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java @@ -0,0 +1,25 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.kleppmanntree.PeerInterface; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; + +import java.util.Collection; +import java.util.UUID; + +@Singleton +public class JKleppmannTreePeerInterface implements PeerInterface { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Override + public UUID getSelfId() { + return persistentPeerDataService.getSelfUuid(); + } + + @Override + public Collection getAllPeers() { + return persistentPeerDataService.getHostUuidsAndSelf(); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java new file mode 100644 index 00000000..3c84d067 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java @@ -0,0 +1,25 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import lombok.Getter; + +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +public class JKleppmannTreePeriodicPushOp implements Op { + @Getter + private final UUID _from; + @Getter + private final long _timestamp; + + public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) { + _from = from; + _timestamp = timestamp; + } + + @Override + public Collection getEscapedRefs() { + return List.of(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java new file mode 100644 index 00000000..0146da88 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -0,0 +1,45 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; +import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import com.usatiuk.kleppmanntree.TreeNode; +import lombok.Getter; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +// FIXME: Ideally this is two classes? +@OnlyLocal +public class JKleppmannTreeNode extends JObjectData { + @Getter + final TreeNode _node; + + public JKleppmannTreeNode(TreeNode node) { + _node = node; + } + + @Override + public String getName() { + return _node.getId(); + } + + @Override + public Class getConflictResolver() { + return null; + } + + @Override + public Collection extractRefs() { + if (_node.getMeta() instanceof JKleppmannTreeNodeMetaFile) + return List.of(((JKleppmannTreeNodeMetaFile) _node.getMeta()).getFileIno()); + return Collections.unmodifiableCollection(_node.getChildren().values()); + } + + @Override + public Class getRefType() { + return JObjectData.class; + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java new file mode 100644 index 00000000..2ea7d27f --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java @@ -0,0 +1,31 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; +import com.usatiuk.kleppmanntree.NodeMeta; +import lombok.Getter; + +import java.util.Objects; + +@ProtoMirror(JKleppmannTreeNodeMetaP.class) +public abstract class JKleppmannTreeNodeMeta implements NodeMeta { + @Getter + private final String _name; + + public JKleppmannTreeNodeMeta(String name) {_name = name;} + + public abstract JKleppmannTreeNodeMeta withName(String name); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + JKleppmannTreeNodeMeta that = (JKleppmannTreeNodeMeta) o; + return Objects.equals(_name, that._name); + } + + @Override + public int hashCode() { + return Objects.hashCode(_name); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java new file mode 100644 index 00000000..79882017 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java @@ -0,0 +1,16 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaDirectoryP; + +@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class) +public class JKleppmannTreeNodeMetaDirectory extends JKleppmannTreeNodeMeta { + public JKleppmannTreeNodeMetaDirectory(String name) { + super(name); + } + + @Override + public JKleppmannTreeNodeMeta withName(String name) { + return new JKleppmannTreeNodeMetaDirectory(name); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java new file mode 100644 index 00000000..124cd51d --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -0,0 +1,37 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; +import lombok.Getter; + +import java.util.Objects; + +@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) +public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { + @Getter + private final String _fileIno; + + public JKleppmannTreeNodeMetaFile(String name, String fileIno) { + super(name); + _fileIno = fileIno; + } + + @Override + public JKleppmannTreeNodeMeta withName(String name) { + return new JKleppmannTreeNodeMetaFile(name, _fileIno); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + JKleppmannTreeNodeMetaFile that = (JKleppmannTreeNodeMetaFile) o; + return Objects.equals(_fileIno, that._fileIno); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), _fileIno); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java new file mode 100644 index 00000000..d6881d5b --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -0,0 +1,88 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; +import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import com.usatiuk.kleppmanntree.AtomicClock; +import com.usatiuk.kleppmanntree.CombinedTimestamp; +import com.usatiuk.kleppmanntree.LogRecord; +import com.usatiuk.kleppmanntree.OpMove; +import lombok.Getter; + +import java.util.*; + +@OnlyLocal +public class JKleppmannTreePersistentData extends JObjectData { + private final String _treeName; + @Getter + private final AtomicClock _clock; + @Getter + private final HashMap, OpMove>> _queues; + @Getter + private final HashMap _peerTimestampLog; + @Getter + private final TreeMap, LogRecord> _log; + + public JKleppmannTreePersistentData(String treeName, AtomicClock clock, + HashMap, OpMove>> queues, + HashMap peerTimestampLog, TreeMap, LogRecord> log) { + _treeName = treeName; + _clock = clock; + _queues = queues; + _peerTimestampLog = peerTimestampLog; + _log = log; + } + + public JKleppmannTreePersistentData(String treeName) { + _treeName = treeName; + _clock = new AtomicClock(1); + _queues = new HashMap<>(); + _peerTimestampLog = new HashMap<>(); + _log = new TreeMap<>(); + } + + public static String nameFromTreeName(String treeName) { + return treeName + "_pd"; + } + + public void recordOp(UUID host, OpMove opMove) { + _queues.computeIfAbsent(host, h -> new TreeMap<>()); + _queues.get(host).put(opMove.timestamp(), opMove); + } + + public void removeOp(UUID host, OpMove opMove) { + _queues.get(host).remove(opMove.timestamp(), opMove); + } + + public void recordOp(Collection hosts, OpMove opMove) { + for (var u : hosts) { + recordOp(u, opMove); + } + } + + public void removeOp(Collection hosts, OpMove opMove) { + for (var u : hosts) { + removeOp(u, opMove); + } + } + + + @Override + public String getName() { + return nameFromTreeName(_treeName); + } + + public String getTreeName() { + return _treeName; + } + + @Override + public Class getConflictResolver() { + return null; + } + + @Override + public Collection extractRefs() { + return List.of(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java similarity index 98% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java index b21a9ece..1d0a9ca0 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.jrepository; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; public abstract class JObject { public abstract ObjectMetadata getMeta(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java similarity index 98% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java index 5c7ac28f..377c9533 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.jrepository; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import jakarta.annotation.Nullable; import java.util.Collection; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java similarity index 99% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java index 5a24c1e7..5cd3e2ce 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java @@ -7,7 +7,7 @@ import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java similarity index 99% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java index 66914cca..5de25357 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.jrepository; import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor; -import com.usatiuk.utils.HashSetDelayedBlockingQueue; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java similarity index 99% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java index 3d8282aa..3634f3a2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java @@ -4,7 +4,7 @@ import com.usatiuk.autoprotomap.runtime.ProtoSerializer; import com.usatiuk.dhfs.objects.persistence.JObjectDataP; import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; import jakarta.annotation.Nullable; import jakarta.enterprise.context.ApplicationScoped; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java similarity index 91% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java index 14c6146f..70a4e60e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.jrepository; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; public interface TxWriteback { TxBundle createBundle(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java similarity index 99% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java index db5e7119..ab1b1440 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.jrepository; import com.usatiuk.dhfs.objects.persistence.JObjectDataP; import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java new file mode 100644 index 00000000..fcb5a07e --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java @@ -0,0 +1,63 @@ +package com.usatiuk.dhfs.objects.repository; + +import org.apache.commons.codec.digest.DigestUtils; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.x500.X500Name; +import org.bouncycastle.asn1.x509.BasicConstraints; +import org.bouncycastle.cert.CertIOException; +import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.operator.ContentSigner; +import org.bouncycastle.operator.OperatorCreationException; +import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.math.BigInteger; +import java.security.*; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.Calendar; +import java.util.Date; + +public class CertificateTools { + + public static X509Certificate certFromBytes(byte[] bytes) throws CertificateException { + CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + InputStream in = new ByteArrayInputStream(bytes); + return (X509Certificate) certFactory.generateCertificate(in); + } + + public static KeyPair generateKeyPair() throws NoSuchAlgorithmException { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); + keyGen.initialize(2048); //FIXME: + return keyGen.generateKeyPair(); + } + + public static X509Certificate generateCertificate(KeyPair keyPair, String subject) throws CertificateException, CertIOException, NoSuchAlgorithmException, OperatorCreationException { + Provider bcProvider = new BouncyCastleProvider(); + Security.addProvider(bcProvider); + + Date startDate = new Date(); + + X500Name cnName = new X500Name("CN=" + subject); + BigInteger certSerialNumber = new BigInteger(DigestUtils.sha256(subject)); + + Calendar calendar = Calendar.getInstance(); + calendar.setTime(startDate); + calendar.add(Calendar.YEAR, 999); + + Date endDate = calendar.getTime(); + + ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate()); + + JcaX509v3CertificateBuilder certBuilder = new JcaX509v3CertificateBuilder(cnName, certSerialNumber, startDate, endDate, cnName, keyPair.getPublic()); + + BasicConstraints basicConstraints = new BasicConstraints(false); + certBuilder.addExtension(new ASN1ObjectIdentifier("2.5.29.19"), true, basicConstraints); + + return new JcaX509CertificateConverter().setProvider(bcProvider).getCertificate(certBuilder.build(contentSigner)); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java new file mode 100644 index 00000000..29a53d88 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java @@ -0,0 +1,277 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.repository.peersync.PeerSyncApiClientDynamic; +import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; +import com.usatiuk.dhfs.objects.repository.webapi.AvailablePeerInfo; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import io.quarkus.scheduler.Scheduled; +import io.smallrye.common.annotation.Blocking; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import lombok.Getter; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.security.cert.CertificateException; +import java.util.*; +import java.util.concurrent.*; + +@ApplicationScoped +public class PeerManager { + private final TransientPeersState _transientPeersState = new TransientPeersState(); + private final ConcurrentMap _seenButNotAdded = new ConcurrentHashMap<>(); + // FIXME: Ideally not call them on every ping + private final ArrayList _connectedListeners = new ArrayList<>(); + private final ArrayList _disconnectedListeners = new ArrayList<>(); + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + SyncHandler syncHandler; + @Inject + RpcClientFactory rpcClientFactory; + @Inject + PeerSyncApiClientDynamic peerSyncApiClient; + @ConfigProperty(name = "dhfs.objects.sync.ping.timeout") + long pingTimeout; + private ExecutorService _heartbeatExecutor; + @Getter + private boolean _ready = false; + + // Note: keep priority updated with below + void init(@Observes @Priority(600) StartupEvent event) throws IOException { + _heartbeatExecutor = Executors.newVirtualThreadPerTaskExecutor(); + + // Note: newly added hosts aren't in _transientPeersState + // but that's ok as they don't have initialSyncDone set + for (var h : persistentPeerDataService.getHostUuids()) + _transientPeersState.runWriteLocked(d -> d.get(h)); + + _ready = true; + } + + void shutdown(@Observes @Priority(50) ShutdownEvent event) throws IOException { + _ready = false; + } + + @Scheduled(every = "${dhfs.objects.reconnect_interval}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) + @Blocking + public void tryConnectAll() { + if (!_ready) return; + try { + _heartbeatExecutor.invokeAll(persistentPeerDataService.getHostUuids() + .stream() + .>map(host -> () -> { + try { + if (isReachable(host)) + Log.trace("Heartbeat: " + host); + else + Log.debug("Trying to connect to " + host); + if (pingCheck(host)) + handleConnectionSuccess(host); + else + handleConnectionError(host); + } catch (Exception e) { + Log.error("Failed to connect to " + host, e); + } + return null; + }).toList(), 30, TimeUnit.SECONDS); //FIXME: + } catch (InterruptedException iex) { + Log.error("Heartbeat was interrupted"); + } + } + + // Note: registrations should be completed with Priority < 600 + public void registerConnectEventListener(ConnectionEventListener listener) { + if (_ready) throw new IllegalStateException("Already initialized"); + synchronized (_connectedListeners) { + _connectedListeners.add(listener); + } + } + + // Note: registrations should be completed with Priority < 600 + public void registerDisconnectEventListener(ConnectionEventListener listener) { + if (_ready) throw new IllegalStateException("Already initialized"); + synchronized (_disconnectedListeners) { + _disconnectedListeners.add(listener); + } + } + + public void handleConnectionSuccess(UUID host) { + if (!_ready) return; + + boolean wasReachable = isReachable(host); + + boolean shouldSyncObj = persistentPeerDataService.markInitialObjSyncDone(host); + boolean shouldSyncOp = persistentPeerDataService.markInitialOpSyncDone(host); + + if (shouldSyncObj) + syncHandler.pushInitialResyncObj(host); + if (shouldSyncOp) + syncHandler.pushInitialResyncOp(host); + + _transientPeersState.runWriteLocked(d -> { + d.get(host).setReachable(true); + return null; + }); + + if (wasReachable) return; + + Log.info("Connected to " + host); + + for (var l : _connectedListeners) { + l.apply(host); + } + } + + public void handleConnectionError(UUID host) { + boolean wasReachable = isReachable(host); + + if (wasReachable) + Log.info("Lost connection to " + host); + + _transientPeersState.runWriteLocked(d -> { + d.get(host).setReachable(false); + return null; + }); + + for (var l : _disconnectedListeners) { + l.apply(host); + } + } + + // FIXME: + private boolean pingCheck(UUID host) { + TransientPeerState state = _transientPeersState.runReadLocked(s -> s.getCopy(host)); + + try { + return rpcClientFactory.withObjSyncClient(host.toString(), state.getAddr(), state.getSecurePort(), pingTimeout, c -> { + var ret = c.ping(PingRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); + if (!UUID.fromString(ret.getSelfUuid()).equals(host)) { + throw new IllegalStateException("Ping selfUuid returned " + ret.getSelfUuid() + " but expected " + host); + } + return true; + }); + } catch (Exception ignored) { + Log.debug("Host " + host + " is unreachable: " + ignored.getMessage() + " " + ignored.getCause()); + return false; + } + } + + public boolean isReachable(UUID host) { + return _transientPeersState.runReadLocked(d -> d.get(host).isReachable()); + } + + public TransientPeerState getTransientState(UUID host) { + return _transientPeersState.runReadLocked(d -> d.getCopy(host)); + } + + public List getAvailableHosts() { + return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream() + .filter(e -> e.getValue().isReachable()) + .map(Map.Entry::getKey).toList()); + } + + public List getUnavailableHosts() { + return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream() + .filter(e -> !e.getValue().isReachable()) + .map(Map.Entry::getKey).toList()); + } + + public HostStateSnapshot getHostStateSnapshot() { + ArrayList available = new ArrayList<>(); + ArrayList unavailable = new ArrayList<>(); + _transientPeersState.runReadLocked(d -> { + for (var v : d.getStates().entrySet()) { + if (v.getValue().isReachable()) + available.add(v.getKey()); + else + unavailable.add(v.getKey()); + } + return null; + } + ); + return new HostStateSnapshot(available, unavailable); + } + + public void notifyAddr(UUID host, String addr, Integer port, Integer securePort) { + if (host.equals(persistentPeerDataService.getSelfUuid())) { + return; + } + + var state = new TransientPeerState(); + state.setAddr(addr); + state.setPort(port); + state.setSecurePort(securePort); + + if (!persistentPeerDataService.existsHost(host)) { + var prev = _seenButNotAdded.put(host, state); + // Needed for tests + if (prev == null) + Log.debug("Ignoring new address from unknown host " + ": addr=" + addr + " port=" + port); + return; + } else { + _seenButNotAdded.remove(host); + } + + _transientPeersState.runWriteLocked(d -> { +// Log.trace("Updating connection info for " + host + ": addr=" + addr + " port=" + port); + d.get(host).setAddr(addr); + d.get(host).setPort(port); + d.get(host).setSecurePort(securePort); + return null; + }); + } + + public void removeRemoteHost(UUID host) { + persistentPeerDataService.removeHost(host); + // Race? + _transientPeersState.runWriteLocked(d -> { + d.getStates().remove(host); + return null; + }); + } + + public void addRemoteHost(UUID host) { + if (!_seenButNotAdded.containsKey(host)) { + throw new IllegalStateException("Host " + host + " is not seen"); + } + if (persistentPeerDataService.existsHost(host)) { + throw new IllegalStateException("Host " + host + " is already added"); + } + + var state = _seenButNotAdded.get(host); + + // FIXME: race? + + var info = peerSyncApiClient.getSelfInfo(state.getAddr(), state.getPort()); + + try { + persistentPeerDataService.addHost( + new PersistentPeerInfo(UUID.fromString(info.selfUuid()), + CertificateTools.certFromBytes(Base64.getDecoder().decode(info.cert())))); + Log.info("Added host: " + host.toString()); + } catch (CertificateException e) { + throw new RuntimeException(e); + } + } + + public Collection getSeenButNotAddedHosts() { + return _seenButNotAdded.entrySet().stream() + .filter(e -> !persistentPeerDataService.existsHost(e.getKey())) + .map(e -> new AvailablePeerInfo(e.getKey().toString(), e.getValue().getAddr(), e.getValue().getPort())) + .toList(); + } + + @FunctionalInterface + public interface ConnectionEventListener { + void apply(UUID host); + } + + public record HostStateSnapshot(List available, List unavailable) { + } + +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java new file mode 100644 index 00000000..0413d8b8 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -0,0 +1,361 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.utils.SerializationHelper; +import com.usatiuk.dhfs.ShutdownChecker; +import com.usatiuk.dhfs.objects.jrepository.*; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectory; +import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectoryLocal; +import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; +import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Nullable; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.SerializationUtils; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.ExecutorService; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + +@ApplicationScoped +public class PersistentPeerDataService { + final String dataFileName = "hosts"; + @ConfigProperty(name = "dhfs.objects.root") + String dataRoot; + @Inject + PeerTrustManager peerTrustManager; + @Inject + JObjectManager jObjectManager; + @Inject + ExecutorService executorService; + @Inject + InvalidationQueueService invalidationQueueService; + @Inject + RpcClientFactory rpcClientFactory; + @Inject + ShutdownChecker shutdownChecker; + @Inject + JObjectTxManager jObjectTxManager; + @Inject + SoftJObjectFactory softJObjectFactory; + SoftJObject peerDirectory; + SoftJObject peerDirectoryLocal; + private PersistentRemoteHosts _persistentData = new PersistentRemoteHosts(); + private UUID _selfUuid; + + void init(@Observes @Priority(300) StartupEvent event) throws IOException { + Paths.get(dataRoot).toFile().mkdirs(); + Log.info("Initializing with root " + dataRoot); + if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { + Log.info("Reading hosts"); + _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); + } else if (Paths.get(dataRoot).resolve(dataFileName + ".bak").toFile().exists()) { + Log.warn("Reading hosts from backup"); + _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); + } + _selfUuid = _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfUuid); + + if (_persistentData.runReadLocked(d -> d.getSelfCertificate() == null)) { + jObjectTxManager.executeTxAndFlush(() -> { + _persistentData.runWriteLocked(d -> { + try { + Log.info("Generating a key pair, please wait"); + d.setSelfKeyPair(CertificateTools.generateKeyPair()); + d.setSelfCertificate(CertificateTools.generateCertificate(d.getSelfKeyPair(), _selfUuid.toString())); + } catch (Exception e) { + throw new RuntimeException("Failed generating cert", e); + } + return null; + }); + var newpd = new PeerDirectory(); + jObjectManager.put(new PersistentPeerInfo(_selfUuid, getSelfCertificate()), Optional.of(PeerDirectory.PeerDirectoryObjName)); + newpd.getPeers().add(_selfUuid); + jObjectManager.put(newpd, Optional.empty()); + jObjectManager.put(new PeerDirectoryLocal(), Optional.empty()); + }); + } + + peerDirectory = softJObjectFactory.create(PeerDirectory.class, PeerDirectory.PeerDirectoryObjName); + peerDirectoryLocal = softJObjectFactory.create(PeerDirectoryLocal.class, PeerDirectoryLocal.PeerDirectoryLocalObjName); + + if (!shutdownChecker.lastShutdownClean()) { + _persistentData.getData().getIrregularShutdownCounter().addAndGet(1); + jObjectTxManager.executeTxAndFlush(() -> { + peerDirectoryLocal.get().rwLock(); + peerDirectoryLocal.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + try { + peerDirectoryLocal.get().getData().getInitialObjSyncDone().clear(); + peerDirectoryLocal.get().bumpVer(); + } finally { + peerDirectoryLocal.get().rwUnlock(); + } + }); + } + + jObjectManager.registerWriteListener(PersistentPeerInfo.class, this::pushPeerUpdates); + jObjectManager.registerWriteListener(PeerDirectory.class, this::pushPeerUpdates); + + // FIXME: Warn on failed resolves? + peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + peerTrustManager.reloadTrustManagerHosts(getHosts()); + return null; + }); + + Files.writeString(Paths.get(dataRoot, "self_uuid"), _selfUuid.toString()); + Log.info("Self uuid is: " + _selfUuid.toString()); + writeData(); + } + + void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException { + Log.info("Saving hosts"); + writeData(); + Log.info("Shutdown"); + } + + private void writeData() { + try { + if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) + Files.move(Paths.get(dataRoot).resolve(dataFileName), Paths.get(dataRoot).resolve(dataFileName + ".bak"), REPLACE_EXISTING); + Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData)); + } catch (IOException iex) { + Log.error("Error writing persistent hosts data", iex); + throw new RuntimeException(iex); + } + } + + private void pushPeerUpdates() { + pushPeerUpdates(null); + } + + private void pushPeerUpdates(@Nullable JObject obj) { + if (obj != null) + Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated"); + executorService.submit(() -> { + updateCerts(); + invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName); + for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList())) + invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p)); + }); + } + + private JObject getPeer(UUID uuid) { + var got = jObjectManager.get(PersistentPeerInfo.getNameFromUuid(uuid)).orElseThrow(() -> new IllegalStateException("Peer " + uuid + " not found")); + got.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + if (d == null) throw new IllegalStateException("Could not resolve peer " + uuid); + if (!(d instanceof PersistentPeerInfo)) + throw new IllegalStateException("Peer " + uuid + " is of wrong type!"); + return null; + }); + return (JObject) got; + } + + private List getPeersSnapshot() { + return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, + (m, d) -> d.getPeers().stream().map(u -> { + try { + return getPeer(u).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m2, d2) -> d2); + } catch (Exception e) { + Log.warn("Error making snapshot of peer " + u, e); + return null; + } + }).filter(Objects::nonNull).toList()); + } + + public UUID getSelfUuid() { + if (_selfUuid == null) + throw new IllegalStateException(); + else return _selfUuid; + } + + public String getUniqueId() { + String sb = String.valueOf(_selfUuid) + + _persistentData.getData().getIrregularShutdownCounter() + + "_" + + _persistentData.getData().getSelfCounter().addAndGet(1); + return sb; + } + + public PersistentPeerInfo getInfo(UUID name) { + return getPeer(name).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d); + } + + public List getHosts() { + return getPeersSnapshot().stream().filter(i -> !i.getUuid().equals(_selfUuid)).toList(); + } + + public List getHostUuids() { + return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().filter(i -> !i.equals(_selfUuid)).toList()); + } + + public List getHostUuidsAndSelf() { + return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList()); + } + + public List getHostsNoNulls() { + for (int i = 0; i < 5; i++) { + try { + return peerDirectory.get() + .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, + (m, d) -> d.getPeers().stream() + .map(u -> getPeer(u).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m2, d2) -> d2)) + .filter(e -> !e.getUuid().equals(_selfUuid)).toList()); + } catch (Exception e) { + Log.warn("Error when making snapshot of hosts: " + e.getMessage()); + try { + Thread.sleep(i * 2); + } catch (InterruptedException ignored) { + } + } + } + throw new StatusRuntimeException(Status.ABORTED.withDescription("Could not make a snapshot of peers in 5 tries!")); + } + + public boolean addHost(PersistentPeerInfo persistentPeerInfo) { + return jObjectTxManager.executeTx(() -> { + if (persistentPeerInfo.getUuid().equals(_selfUuid)) return false; + + boolean added = peerDirectory.get().runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { + boolean addedInner = d.getPeers().add(persistentPeerInfo.getUuid()); + if (addedInner) { + jObjectManager.put(persistentPeerInfo, Optional.of(m.getName())); + b.apply(); + } + return addedInner; + }); + return added; + }); + } + + public boolean removeHost(UUID host) { + return jObjectTxManager.executeTx(() -> { + boolean removed = peerDirectory.get().runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { + boolean removedInner = d.getPeers().remove(host); + Log.info("Removing host: " + host + (removedInner ? " removed" : " did not exists")); + if (removedInner) { + peerDirectoryLocal.get().rwLock(); + peerDirectoryLocal.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + try { + peerDirectoryLocal.get().getData().getInitialObjSyncDone().remove(host); + peerDirectoryLocal.get().getData().getInitialOpSyncDone().remove(host); + peerDirectoryLocal.get().bumpVer(); + } finally { + peerDirectoryLocal.get().rwUnlock(); + } + getPeer(host).runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mp, dp, bp, vp) -> { + mp.removeRef(m.getName()); + return null; + }); + b.apply(); + } + return removedInner; + }); + return removed; + }); + } + + private void updateCerts() { + try { + peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + peerTrustManager.reloadTrustManagerHosts(getHostsNoNulls()); + // Fixme:? I don't think it should be needed with custom trust store + // but it doesn't work? + rpcClientFactory.dropCache(); + return null; + }); + } catch (Exception ex) { + Log.warn("Error when refreshing certificates, will retry: " + ex.getMessage()); + pushPeerUpdates(); + } + } + + public boolean existsHost(UUID uuid) { + return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().contains(uuid)); + } + + public PersistentPeerInfo getHost(UUID uuid) { + if (!existsHost(uuid)) + throw new StatusRuntimeException(Status.NOT_FOUND); + return getPeer(uuid).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d); + } + + public KeyPair getSelfKeypair() { + return _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfKeyPair); + } + + public X509Certificate getSelfCertificate() { + return _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfCertificate); + } + + // Returns true if host's initial sync wasn't done before, and marks it as done + public boolean markInitialOpSyncDone(UUID connectedHost) { + return jObjectTxManager.executeTx(() -> { + peerDirectoryLocal.get().rwLock(); + try { + peerDirectoryLocal.get().local(); + boolean contained = peerDirectoryLocal.get().getData().getInitialOpSyncDone().contains(connectedHost); + + if (!contained) + peerDirectoryLocal.get().local().mutate(new JMutator() { + @Override + public boolean mutate(PeerDirectoryLocal object) { + object.getInitialOpSyncDone().add(connectedHost); + return true; + } + + @Override + public void revert(PeerDirectoryLocal object) { + object.getInitialOpSyncDone().remove(connectedHost); + } + }); + return !contained; + } finally { + peerDirectoryLocal.get().rwUnlock(); + } + }); + } + + public boolean markInitialObjSyncDone(UUID connectedHost) { + return jObjectTxManager.executeTx(() -> { + peerDirectoryLocal.get().rwLock(); + try { + peerDirectoryLocal.get().local(); + boolean contained = peerDirectoryLocal.get().getData().getInitialObjSyncDone().contains(connectedHost); + + if (!contained) + peerDirectoryLocal.get().local().mutate(new JMutator() { + @Override + public boolean mutate(PeerDirectoryLocal object) { + object.getInitialObjSyncDone().add(connectedHost); + return true; + } + + @Override + public void revert(PeerDirectoryLocal object) { + object.getInitialObjSyncDone().remove(connectedHost); + } + }); + return !contained; + } finally { + peerDirectoryLocal.get().rwUnlock(); + } + }); + } + +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java new file mode 100644 index 00000000..a6b0c8f3 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java @@ -0,0 +1,29 @@ +package com.usatiuk.dhfs.objects.repository; + +import lombok.Getter; +import lombok.Setter; + +import java.io.Serial; +import java.io.Serializable; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; + +public class PersistentRemoteHostsData implements Serializable { + @Serial + private static final long serialVersionUID = 1L; + + @Getter + private final UUID _selfUuid = UUID.randomUUID(); + @Getter + private final AtomicLong _selfCounter = new AtomicLong(); + @Getter + private final AtomicLong _irregularShutdownCounter = new AtomicLong(); + @Getter + @Setter + private X509Certificate _selfCertificate = null; + @Getter + @Setter + private KeyPair _selfKeyPair = null; +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java new file mode 100644 index 00000000..a9a277c4 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java @@ -0,0 +1,174 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.google.common.collect.Maps; +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.jrepository.*; +import com.usatiuk.dhfs.objects.persistence.JObjectDataP; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; + +import javax.annotation.Nullable; +import java.util.*; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.Executors; +import java.util.stream.Collectors; + +@ApplicationScoped +public class RemoteObjectServiceClient { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Inject + RpcClientFactory rpcClientFactory; + + @Inject + JObjectManager jObjectManager; + + @Inject + SyncHandler syncHandler; + @Inject + InvalidationQueueService invalidationQueueService; + @Inject + ProtoSerializer dataProtoSerializer; + @Inject + ProtoSerializer opProtoSerializer; + @Inject + JObjectTxManager jObjectTxManager; + + public Pair getSpecificObject(UUID host, String name) { + return rpcClientFactory.withObjSyncClient(host, client -> { + var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(name).build()); + return Pair.of(reply.getObject().getHeader(), reply.getObject().getContent()); + }); + } + + public JObjectDataP getObject(JObject jObject) { + jObject.assertRwLock(); + + var targets = jObject.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d) -> { + var ourVersion = md.getOurVersion(); + if (ourVersion >= 1) + return md.getRemoteCopies().entrySet().stream() + .filter(entry -> entry.getValue().equals(ourVersion)) + .map(Map.Entry::getKey).toList(); + else + return persistentPeerDataService.getHostUuids(); + }); + + if (targets.isEmpty()) + throw new IllegalStateException("No targets for object " + jObject.getMeta().getName()); + + Log.info("Downloading object " + jObject.getMeta().getName() + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); + + return rpcClientFactory.withObjSyncClient(targets, client -> { + var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(jObject.getMeta().getName()).build()); + + var receivedMap = new HashMap(); + for (var e : reply.getObject().getHeader().getChangelog().getEntriesList()) { + receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); + } + + return jObjectTxManager.executeTx(() -> { + return jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d, b, v) -> { + var unexpected = !Objects.equals( + Maps.filterValues(md.getChangelog(), val -> val != 0), + Maps.filterValues(receivedMap, val -> val != 0)); + + if (unexpected) { + try { + syncHandler.handleOneUpdate(UUID.fromString(reply.getSelfUuid()), reply.getObject().getHeader()); + } catch (SyncHandler.OutdatedUpdateException ignored) { + Log.info("Outdated update of " + md.getName() + " from " + reply.getSelfUuid()); + invalidationQueueService.pushInvalidationToOne(UUID.fromString(reply.getSelfUuid()), md.getName()); // True? + throw new StatusRuntimeException(Status.ABORTED.withDescription("Received outdated object version")); + } catch (Exception e) { + Log.error("Received unexpected object version from " + reply.getSelfUuid() + + " for " + reply.getObject().getHeader().getName() + " and conflict resolution failed", e); + throw new StatusRuntimeException(Status.ABORTED.withDescription("Received unexpected object version")); + } + } + + return reply.getObject().getContent(); + }); + }); + }); + } + + @Nullable + public IndexUpdateReply notifyUpdate(JObject obj, UUID host) { + var builder = IndexUpdatePush.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); + + var header = obj + .runReadLocked( + obj.getMeta().getKnownClass().isAnnotationPresent(PushResolution.class) + ? JObjectManager.ResolutionStrategy.LOCAL_ONLY + : JObjectManager.ResolutionStrategy.NO_RESOLUTION, + (m, d) -> { + if (obj.getMeta().isDeleted()) return null; + if (m.getKnownClass().isAnnotationPresent(PushResolution.class) && d == null) + Log.warn("Object " + m.getName() + " is marked as PushResolution but no resolution found"); + if (m.getKnownClass().isAnnotationPresent(PushResolution.class)) + return m.toRpcHeader(dataProtoSerializer.serialize(d)); + else + return m.toRpcHeader(); + }); + if (header == null) return null; + jObjectTxManager.executeTx(obj::markSeen); + builder.setHeader(header); + + var send = builder.build(); + + return rpcClientFactory.withObjSyncClient(host, client -> client.indexUpdate(send)); + } + + public OpPushReply pushOps(List ops, String queueName, UUID host) { + for (Op op : ops) { + for (var ref : op.getEscapedRefs()) { + jObjectTxManager.executeTx(() -> { + jObjectManager.get(ref).ifPresent(JObject::markSeen); + }); + } + } + var builder = OpPushMsg.newBuilder() + .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) + .setQueueId(queueName); + for (var op : ops) + builder.addMsg(opProtoSerializer.serialize(op)); + return rpcClientFactory.withObjSyncClient(host, client -> client.opPush(builder.build())); + } + + public Collection canDelete(Collection targets, String object, Collection ourReferrers) { + ConcurrentLinkedDeque results = new ConcurrentLinkedDeque<>(); + Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); + try (var executor = Executors.newVirtualThreadPerTaskExecutor()) { + try { + executor.invokeAll(targets.stream().>map(h -> () -> { + try { + var req = CanDeleteRequest.newBuilder() + .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) + .setName(object); + req.addAllOurReferrers(ourReferrers); + var res = rpcClientFactory.withObjSyncClient(h, client -> client.canDelete(req.build())); + if (res != null) + results.add(res); + } catch (Exception e) { + Log.debug("Error when asking canDelete for object " + object, e); + } + return null; + }).toList()); + } catch (InterruptedException e) { + Log.warn("Interrupted waiting for canDelete for object " + object); + } + if (!executor.shutdownNow().isEmpty()) + Log.warn("Didn't ask all targets when asking canDelete for " + object); + } + return results; + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java new file mode 100644 index 00000000..17b9bb22 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java @@ -0,0 +1,184 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +import com.usatiuk.dhfs.objects.persistence.JObjectDataP; +import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.grpc.GrpcService; +import io.quarkus.logging.Log; +import io.smallrye.common.annotation.Blocking; +import io.smallrye.mutiny.Uni; +import jakarta.annotation.security.RolesAllowed; +import jakarta.inject.Inject; + +import java.util.UUID; + +// Note: RunOnVirtualThread hangs somehow +@GrpcService +@RolesAllowed("cluster-member") +public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { + @Inject + SyncHandler syncHandler; + + @Inject + JObjectManager jObjectManager; + + @Inject + PeerManager remoteHostManager; + + @Inject + AutoSyncProcessor autoSyncProcessor; + + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Inject + InvalidationQueueService invalidationQueueService; + + @Inject + ProtoSerializer dataProtoSerializer; + @Inject + ProtoSerializer opProtoSerializer; + + @Inject + OpObjectRegistry opObjectRegistry; + + @Inject + JObjectTxManager jObjectTxManager; + + @Override + @Blocking + public Uni getObject(GetObjectRequest request) { + if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) + throw new StatusRuntimeException(Status.UNAUTHENTICATED); + + Log.info("<-- getObject: " + request.getName() + " from " + request.getSelfUuid()); + + var obj = jObjectManager.get(request.getName()).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); + + // Does @Blocking break this? + return Uni.createFrom().emitter(emitter -> { + var replyObj = jObjectTxManager.executeTx(() -> { + // Obj.markSeen before markSeen of its children + obj.markSeen(); + return obj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (meta, data) -> { + if (meta.isOnlyLocal()) + throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Trying to get local-only object")); + if (data == null) { + Log.info("<-- getObject FAIL: " + request.getName() + " from " + request.getSelfUuid()); + throw new StatusRuntimeException(Status.ABORTED.withDescription("Not available locally")); + } + data.extractRefs().forEach(ref -> + jObjectManager.get(ref) + .orElseThrow(() -> new IllegalStateException("Non-hydrated refs for local object?")) + .markSeen()); + + return ApiObject.newBuilder() + .setHeader(obj.getMeta().toRpcHeader()) + .setContent(dataProtoSerializer.serialize(obj.getData())).build(); + }); + }); + var ret = GetObjectReply.newBuilder() + .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) + .setObject(replyObj).build(); + // TODO: Could this cause problems if we wait for too long? + obj.commitFenceAsync(() -> emitter.complete(ret)); + }); + } + + @Override + @Blocking + public Uni canDelete(CanDeleteRequest request) { + if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) + throw new StatusRuntimeException(Status.UNAUTHENTICATED); + + Log.info("<-- canDelete: " + request.getName() + " from " + request.getSelfUuid()); + + var builder = CanDeleteReply.newBuilder(); + + var obj = jObjectManager.get(request.getName()); + + builder.setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); + builder.setObjName(request.getName()); + + if (obj.isPresent()) try { + boolean tryUpdate = obj.get().runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> { + if (m.isDeleted() && !m.isDeletionCandidate()) + throw new IllegalStateException("Object " + m.getName() + " is deleted but not a deletion candidate"); + builder.setDeletionCandidate(m.isDeletionCandidate()); + builder.addAllReferrers(m.getReferrers()); + return m.isDeletionCandidate() && !m.isDeleted(); + }); + // FIXME +// if (tryUpdate) { +// obj.get().runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { +// return null; +// }); +// } + } catch (DeletedObjectAccessException dox) { + builder.setDeletionCandidate(true); + } + else { + builder.setDeletionCandidate(true); + } + + var ret = builder.build(); + + if (!ret.getDeletionCandidate()) + for (var rr : request.getOurReferrersList()) + autoSyncProcessor.add(rr); + + return Uni.createFrom().item(ret); + } + + @Override + @Blocking + public Uni indexUpdate(IndexUpdatePush request) { + if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) + throw new StatusRuntimeException(Status.UNAUTHENTICATED); + +// Log.info("<-- indexUpdate: " + request.getHeader().getName()); + return jObjectTxManager.executeTxAndFlush(() -> { + return Uni.createFrom().item(syncHandler.handleRemoteUpdate(request)); + }); + } + + @Override + @Blocking + public Uni opPush(OpPushMsg request) { + if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) + throw new StatusRuntimeException(Status.UNAUTHENTICATED); + + try { + var objs = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList(); + jObjectTxManager.executeTxAndFlush(() -> { + opObjectRegistry.acceptExternalOps(request.getQueueId(), UUID.fromString(request.getSelfUuid()), objs); + }); + } catch (Exception e) { + Log.error(e, e); + throw e; + } + return Uni.createFrom().item(OpPushReply.getDefaultInstance()); + } + + @Override + @Blocking + public Uni ping(PingRequest request) { + if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + return Uni.createFrom().item(PingReply.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java new file mode 100644 index 00000000..3239ec7d --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java @@ -0,0 +1,69 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; +import io.grpc.ChannelCredentials; +import io.grpc.ManagedChannel; +import io.grpc.TlsChannelCredentials; +import io.grpc.netty.NettyChannelBuilder; +import io.quarkus.runtime.ShutdownEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; + +import javax.net.ssl.KeyManagerFactory; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +//FIXME: Leaks! +@ApplicationScoped +public class RpcChannelFactory { + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + PeerTrustManager peerTrustManager; + private ConcurrentMap _secureChannelCache = new ConcurrentHashMap<>(); + + void shutdown(@Observes @Priority(100000) ShutdownEvent event) { + for (var c : _secureChannelCache.values()) c.shutdownNow(); + } + + private ChannelCredentials getChannelCredentials() { + try { + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + ks.load(null, null); + + ks.setKeyEntry("clientkey", persistentPeerDataService.getSelfKeypair().getPrivate(), null, new Certificate[]{persistentPeerDataService.getSelfCertificate()}); + + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(ks, null); + + ChannelCredentials creds = TlsChannelCredentials.newBuilder().trustManager(peerTrustManager).keyManager(keyManagerFactory.getKeyManagers()).build(); + return creds; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + ManagedChannel getSecureChannel(String host, String address, int port) { + var key = new SecureChannelKey(host, address, port); + return _secureChannelCache.computeIfAbsent(key, (k) -> { + return NettyChannelBuilder.forAddress(address, port, getChannelCredentials()).overrideAuthority(host).idleTimeout(10, TimeUnit.SECONDS).build(); + }); + } + + public void dropCache() { + var oldS = _secureChannelCache; + _secureChannelCache = new ConcurrentHashMap<>(); + oldS.values().forEach(ManagedChannel::shutdown); + } + + private record SecureChannelKey(String host, String address, int port) { + } + + private record InsecureChannelKey(String address, int port) { + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java new file mode 100644 index 00000000..aff24f85 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java @@ -0,0 +1,88 @@ +package com.usatiuk.dhfs.objects.repository; + +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +// TODO: Dedup this +@ApplicationScoped +public class RpcClientFactory { + @ConfigProperty(name = "dhfs.objects.sync.timeout") + long syncTimeout; + + @Inject + PeerManager remoteHostManager; + + @Inject + RpcChannelFactory rpcChannelFactory; + // FIXME: Leaks! + private ConcurrentMap _objSyncCache = new ConcurrentHashMap<>(); + + public R withObjSyncClient(Collection targets, ObjectSyncClientFunction fn) { + var shuffledList = new ArrayList<>(targets); + Collections.shuffle(shuffledList); + for (UUID target : shuffledList) { + try { + return withObjSyncClient(target, fn); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode().equals(Status.UNAVAILABLE.getCode())) + Log.debug("Host " + target + " is unreachable: " + e.getMessage()); + else + Log.warn("When calling " + target + " " + e.getMessage()); + } catch (Exception e) { + Log.warn("When calling " + target + " " + e.getMessage()); + } + } + throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("No reachable targets!")); + } + + public R withObjSyncClient(UUID target, ObjectSyncClientFunction fn) { + var hostinfo = remoteHostManager.getTransientState(target); + boolean reachable = remoteHostManager.isReachable(target); + + if (hostinfo.getAddr() == null) + throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Address for " + target + " not yet known")); + + if (!reachable) + throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Not known to be reachable: " + target)); + + return withObjSyncClient(target.toString(), hostinfo.getAddr(), hostinfo.getSecurePort(), syncTimeout, fn); + } + + public R withObjSyncClient(String host, String addr, int port, long timeout, ObjectSyncClientFunction fn) { + var key = new ObjSyncStubKey(host, addr, port); + var stub = _objSyncCache.computeIfAbsent(key, (k) -> { + var channel = rpcChannelFactory.getSecureChannel(host, addr, port); + return DhfsObjectSyncGrpcGrpc.newBlockingStub(channel) + .withMaxOutboundMessageSize(Integer.MAX_VALUE) + .withMaxInboundMessageSize(Integer.MAX_VALUE); + + }); + return fn.apply(stub.withDeadlineAfter(timeout, TimeUnit.SECONDS)); + } + + public void dropCache() { + rpcChannelFactory.dropCache(); + _objSyncCache = new ConcurrentHashMap<>(); + } + + @FunctionalInterface + public interface ObjectSyncClientFunction { + R apply(DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub client); + } + + private record ObjSyncStubKey(String host, String address, int port) { + } + +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java new file mode 100644 index 00000000..136041a8 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -0,0 +1,207 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.jrepository.JObject; +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +import com.usatiuk.dhfs.objects.persistence.JObjectDataP; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; +import io.grpc.Status; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.inject.Instance; +import jakarta.inject.Inject; + +import java.util.HashMap; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +@ApplicationScoped +public class SyncHandler { + @Inject + JObjectManager jObjectManager; + @Inject + PeerManager remoteHostManager; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + @Inject + InvalidationQueueService invalidationQueueService; + @Inject + Instance conflictResolvers; + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + ProtoSerializer dataProtoSerializer; + @Inject + OpObjectRegistry opObjectRegistry; + @Inject + JObjectTxManager jObjectTxManager; + + public void pushInitialResyncObj(UUID host) { + Log.info("Doing initial object push for " + host); + + var objs = jObjectManager.findAll(); + + for (var obj : objs) { + Log.trace("IS: " + obj + " to " + host); + invalidationQueueService.pushInvalidationToOne(host, obj); + } + } + + public void pushInitialResyncOp(UUID host) { + Log.info("Doing initial op push for " + host); + + jObjectTxManager.executeTxAndFlush( + () -> { + opObjectRegistry.pushBootstrapData(host); + } + ); + } + + public void handleOneUpdate(UUID from, ObjectHeader header) { + AtomicReference> foundExt = new AtomicReference<>(); + + boolean conflict = jObjectTxManager.executeTx(() -> { + JObject found = jObjectManager.getOrPut(header.getName(), JObjectData.class, Optional.empty()); + foundExt.set(found); + + var receivedTotalVer = header.getChangelog().getEntriesList() + .stream().map(ObjectChangelogEntry::getVersion).reduce(0L, Long::sum); + + var receivedMap = new HashMap(); + for (var e : header.getChangelog().getEntriesList()) { + receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); + } + + return found.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, data, bump, invalidate) -> { + if (md.getRemoteCopies().getOrDefault(from, 0L) > receivedTotalVer) { + Log.error("Received older index update than was known for host: " + + from + " " + header.getName()); + throw new OutdatedUpdateException(); + } + + String rcv = ""; + for (var e : header.getChangelog().getEntriesList()) { + rcv += e.getHost() + ": " + e.getVersion() + "; "; + } + String ours = ""; + for (var e : md.getChangelog().entrySet()) { + ours += e.getKey() + ": " + e.getValue() + "; "; + } + Log.trace("Handling update: " + header.getName() + " from " + from + "\n" + "ours: " + ours + " \n" + "received: " + rcv); + + boolean updatedRemoteVersion = false; + + var oldRemoteVer = md.getRemoteCopies().put(from, receivedTotalVer); + if (oldRemoteVer == null || !oldRemoteVer.equals(receivedTotalVer)) updatedRemoteVersion = true; + + boolean hasLower = false; + boolean hasHigher = false; + for (var e : Stream.concat(md.getChangelog().keySet().stream(), receivedMap.keySet().stream()).collect(Collectors.toSet())) { + if (receivedMap.getOrDefault(e, 0L) < md.getChangelog().getOrDefault(e, 0L)) + hasLower = true; + if (receivedMap.getOrDefault(e, 0L) > md.getChangelog().getOrDefault(e, 0L)) + hasHigher = true; + } + + if (hasLower && hasHigher) { + Log.info("Conflict on update (inconsistent version): " + header.getName() + " from " + from); + return true; + } + + if (hasLower) { + Log.info("Received older index update than known: " + + from + " " + header.getName()); + throw new OutdatedUpdateException(); + } + + if (hasHigher) { + invalidate.apply(); + md.getChangelog().clear(); + md.getChangelog().putAll(receivedMap); + md.getChangelog().putIfAbsent(persistentPeerDataService.getSelfUuid(), 0L); + if (header.hasPushedData()) + found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); + return false; + } else if (data == null && header.hasPushedData()) { + found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + if (found.getData() == null) + found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); + } + + assert Objects.equals(receivedTotalVer, md.getOurVersion()); + + if (!updatedRemoteVersion) + Log.debug("No action on update: " + header.getName() + " from " + from); + + return false; + }); + }); + + // TODO: Is the lock gap here ok? + if (conflict) { + Log.info("Trying conflict resolution: " + header.getName() + " from " + from); + var found = foundExt.get(); + + JObjectData theirsData; + ObjectHeader theirsHeader; + if (header.hasPushedData()) { + theirsHeader = header; + theirsData = dataProtoSerializer.deserialize(header.getPushedData()); + } else { + var got = remoteObjectServiceClient.getSpecificObject(from, header.getName()); + theirsData = dataProtoSerializer.deserialize(got.getRight()); + theirsHeader = got.getLeft(); + } + + jObjectTxManager.executeTx(() -> { + var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + if (d == null) + throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); + return d.getConflictResolver(); + }); + var resolver = conflictResolvers.select(resolverClass); + resolver.get().resolve(from, theirsHeader, theirsData, found); + }); + Log.info("Resolved conflict for " + from + " " + header.getName()); + } + + } + + public IndexUpdateReply handleRemoteUpdate(IndexUpdatePush request) { + // TODO: Dedup + try { + handleOneUpdate(UUID.fromString(request.getSelfUuid()), request.getHeader()); + } catch (OutdatedUpdateException ignored) { + Log.warn("Outdated update of " + request.getHeader().getName() + " from " + request.getSelfUuid()); + invalidationQueueService.pushInvalidationToOne(UUID.fromString(request.getSelfUuid()), request.getHeader().getName()); + } catch (Exception ex) { + Log.info("Error when handling update from " + request.getSelfUuid() + " of " + request.getHeader().getName(), ex); + throw ex; + } + + return IndexUpdateReply.getDefaultInstance(); + } + + protected static class OutdatedUpdateException extends RuntimeException { + OutdatedUpdateException() { + super(); + } + + OutdatedUpdateException(String message) { + super(message); + } + + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } + } +} \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java similarity index 98% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java index fcc5d702..0220c443 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.repository.autosync; import com.usatiuk.dhfs.objects.jrepository.*; import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectory; import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; -import com.usatiuk.utils.HashSetDelayedBlockingQueue; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.Startup; diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java new file mode 100644 index 00000000..63f3e7a1 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java @@ -0,0 +1,17 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import lombok.Getter; +import org.apache.commons.collections4.MultiValuedMap; +import org.apache.commons.collections4.multimap.HashSetValuedHashMap; + +import java.io.Serial; +import java.io.Serializable; +import java.util.UUID; + +public class DeferredInvalidationQueueData implements Serializable { + @Serial + private static final long serialVersionUID = 1L; + + @Getter + private final MultiValuedMap _deferredInvalidations = new HashSetValuedHashMap<>(); +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java new file mode 100644 index 00000000..e62e4d19 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java @@ -0,0 +1,92 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.utils.SerializationHelper; +import com.usatiuk.dhfs.objects.repository.PeerManager; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import io.quarkus.scheduler.Scheduled; +import io.smallrye.common.annotation.Blocking; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.SerializationUtils; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.UUID; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + +@ApplicationScoped +public class DeferredInvalidationQueueService { + private static final String dataFileName = "invqueue"; + @Inject + PeerManager remoteHostManager; + @Inject + InvalidationQueueService invalidationQueueService; + @ConfigProperty(name = "dhfs.objects.root") + String dataRoot; + // FIXME: DB when? + private DeferredInvalidationQueueData _persistentData = new DeferredInvalidationQueueData(); + + void init(@Observes @Priority(290) StartupEvent event) throws IOException { + Paths.get(dataRoot).toFile().mkdirs(); + Log.info("Initializing with root " + dataRoot); + if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { + Log.info("Reading invalidation queue"); + _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); + } else if (Paths.get(dataRoot).resolve(dataFileName + ".bak").toFile().exists()) { + Log.warn("Reading invalidation queue from backup"); + _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); + } + remoteHostManager.registerConnectEventListener(this::returnForHost); + } + + void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException { + Log.info("Saving deferred invalidations"); + writeData(); + Log.info("Saved deferred invalidations"); + } + + + private void writeData() { + try { + if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) + Files.move(Paths.get(dataRoot).resolve(dataFileName), Paths.get(dataRoot).resolve(dataFileName + ".bak"), REPLACE_EXISTING); + Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData)); + } catch (IOException iex) { + Log.error("Error writing deferred invalidations data", iex); + throw new RuntimeException(iex); + } + } + + // FIXME: + @Scheduled(every = "15s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) + @Blocking + void periodicReturn() { + for (var reachable : remoteHostManager.getAvailableHosts()) + returnForHost(reachable); + } + + void returnForHost(UUID host) { + synchronized (this) { + var col = _persistentData.getDeferredInvalidations().get(host); + for (var s : col) { + Log.trace("Un-deferred invalidation to " + host + " of " + s); + invalidationQueueService.pushDeferredInvalidations(host, s); + } + col.clear(); + } + } + + void defer(UUID host, String object) { + synchronized (this) { + Log.trace("Deferred invalidation to " + host + " of " + object); + _persistentData.getDeferredInvalidations().put(host, object); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java new file mode 100644 index 00000000..b5424c28 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java @@ -0,0 +1,180 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; +import com.usatiuk.dhfs.objects.jrepository.JObject; +import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.dhfs.objects.repository.PeerManager; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import io.vertx.core.impl.ConcurrentHashSet; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +@ApplicationScoped +public class InvalidationQueueService { + private final HashSetDelayedBlockingQueue> _queue; + private final AtomicReference> _toAllQueue = new AtomicReference<>(new ConcurrentHashSet<>()); + @Inject + PeerManager remoteHostManager; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + @Inject + JObjectManager jObjectManager; + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + DeferredInvalidationQueueService deferredInvalidationQueueService; + @ConfigProperty(name = "dhfs.objects.invalidation.threads") + int threads; + private ExecutorService _executor; + private volatile boolean _shutdown = false; + + public InvalidationQueueService(@ConfigProperty(name = "dhfs.objects.invalidation.delay") int delay) { + _queue = new HashSetDelayedBlockingQueue<>(delay); + } + + void init(@Observes @Priority(300) StartupEvent event) throws InterruptedException { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("invalidation-%d") + .build(); + + _executor = Executors.newFixedThreadPool(threads, factory); + + for (int i = 0; i < threads; i++) { + _executor.submit(this::sender); + } + } + + void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException { + _shutdown = true; + _executor.shutdownNow(); + if (!_executor.awaitTermination(30, TimeUnit.SECONDS)) { + Log.error("Failed to shut down invalidation sender thread"); + } + var data = _queue.close(); + Log.info("Will defer " + data.size() + " invalidations on shutdown"); + for (var e : data) + deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); + } + + private void sender() { + while (!_shutdown) { + try { + try { + if (!_queue.hasImmediate()) { + ConcurrentHashSet toAllQueue; + + while (true) { + toAllQueue = _toAllQueue.get(); + if (toAllQueue != null) { + if (_toAllQueue.compareAndSet(toAllQueue, null)) + break; + } else { + break; + } + } + + if (toAllQueue != null) { + var hostInfo = remoteHostManager.getHostStateSnapshot(); + for (var o : toAllQueue) { + for (var h : hostInfo.available()) + _queue.add(Pair.of(h, o)); + for (var u : hostInfo.unavailable()) + deferredInvalidationQueueService.defer(u, o); + } + } + } + + var data = _queue.getAllWait(100, _queue.getDelay()); // TODO: config? + if (data.isEmpty()) continue; + String stats = "Sent invalidation: "; + long success = 0; + + for (var e : data) { + if (!persistentPeerDataService.existsHost(e.getLeft())) continue; + + if (!remoteHostManager.isReachable(e.getLeft())) { + deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); + continue; + } + + try { + jObjectManager.get(e.getRight()).ifPresent(obj -> { + remoteObjectServiceClient.notifyUpdate(obj, e.getLeft()); + }); + success++; + } catch (DeletedObjectAccessException ignored) { + } catch (Exception ex) { + Log.info("Failed to send invalidation to " + e.getLeft() + ", will retry", ex); + pushInvalidationToOne(e.getLeft(), e.getRight()); + } + if (_shutdown) { + Log.info("Invalidation sender exiting"); + break; + } + } + + stats += success + "/" + data.size() + " "; + Log.info(stats); + } catch (InterruptedException ie) { + throw ie; + } catch (Exception e) { + Log.error("Exception in invalidation sender thread: ", e); + } + } catch (InterruptedException ignored) { + } + } + Log.info("Invalidation sender exiting"); + } + + public void pushInvalidationToAll(JObject obj) { + if (obj.getMeta().isOnlyLocal()) return; + while (true) { + var queue = _toAllQueue.get(); + if (queue == null) { + var nq = new ConcurrentHashSet(); + if (!_toAllQueue.compareAndSet(null, nq)) continue; + queue = nq; + } + + queue.add(obj.getMeta().getName()); + + if (_toAllQueue.get() == queue) break; + } + } + + public void pushInvalidationToOne(UUID host, JObject obj) { + if (obj.getMeta().isOnlyLocal()) return; + if (remoteHostManager.isReachable(host)) + _queue.add(Pair.of(host, obj.getMeta().getName())); + else + deferredInvalidationQueueService.defer(host, obj.getMeta().getName()); + } + + public void pushInvalidationToAll(String name) { + pushInvalidationToAll(jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); + } + + public void pushInvalidationToOne(UUID host, String name) { + pushInvalidationToOne(host, jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); + } + + protected void pushDeferredInvalidations(UUID host, String name) { + _queue.add(Pair.of(host, name)); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java similarity index 98% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java index 9cd68547..3bf3b647 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.repository.opsupport; import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; import com.usatiuk.dhfs.objects.repository.PeerManager; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import com.usatiuk.utils.HashSetDelayedBlockingQueue; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.Startup; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java new file mode 100644 index 00000000..e51e4d02 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.peersync; + +public record PeerInfo(String selfUuid, String cert) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java new file mode 100644 index 00000000..2d3914f0 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java @@ -0,0 +1,51 @@ +package com.usatiuk.dhfs.objects.repository.peertrust; + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import io.quarkus.logging.Log; +import io.quarkus.security.credential.CertificateCredential; +import io.quarkus.security.identity.AuthenticationRequestContext; +import io.quarkus.security.identity.SecurityIdentity; +import io.quarkus.security.identity.SecurityIdentityAugmentor; +import io.quarkus.security.runtime.QuarkusSecurityIdentity; +import io.smallrye.mutiny.Uni; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.UUID; +import java.util.function.Supplier; + +@ApplicationScoped +public class PeerRolesAugmentor implements SecurityIdentityAugmentor { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Override + public Uni augment(SecurityIdentity identity, AuthenticationRequestContext context) { + return Uni.createFrom().item(build(identity)); + } + + private Supplier build(SecurityIdentity identity) { + if (identity.isAnonymous()) { + return () -> identity; + } else { + QuarkusSecurityIdentity.Builder builder = QuarkusSecurityIdentity.builder(identity); + + var uuid = identity.getPrincipal().getName().substring(3); + + try { + var entry = persistentPeerDataService.getHost(UUID.fromString(uuid)); + + if (!entry.getCertificate().equals(identity.getCredential(CertificateCredential.class).getCertificate())) { + Log.error("Certificate mismatch for " + uuid); + return () -> identity; + } + + builder.addRole("cluster-member"); + return builder::build; + } catch (Exception e) { + Log.error("Error when checking certificate for " + uuid, e); + return () -> identity; + } + } + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java new file mode 100644 index 00000000..ae0d8359 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java @@ -0,0 +1,71 @@ +package com.usatiuk.dhfs.objects.repository.peertrust; + +import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import org.apache.commons.lang3.tuple.Pair; + +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; +import java.io.IOException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Collection; +import java.util.concurrent.atomic.AtomicReference; + +@ApplicationScoped +public class PeerTrustManager implements X509TrustManager { + private final AtomicReference trustManager = new AtomicReference<>(); + + @Override + public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { + trustManager.get().checkClientTrusted(chain, authType); + } + + @Override + public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { + trustManager.get().checkServerTrusted(chain, authType); + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return trustManager.get().getAcceptedIssuers(); + } + + public synchronized void reloadTrustManagerHosts(Collection hosts) { + try { + Log.info("Trying to reload trust manager: " + hosts.size() + " known hosts"); + reloadTrustManager(hosts.stream().map(hostInfo -> + Pair.of(hostInfo.getUuid().toString(), hostInfo.getCertificate())).toList()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private synchronized void reloadTrustManager(Collection> certs) throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException { + KeyStore ts = KeyStore.getInstance(KeyStore.getDefaultType()); + ts.load(null, null); + + for (var cert : certs) { + ts.setCertificateEntry(cert.getLeft(), cert.getRight()); + } + + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ts); + + TrustManager[] tms = tmf.getTrustManagers(); + for (var tm : tms) { + if (tm instanceof X509TrustManager) { + trustManager.set((X509TrustManager) tm); + return; + } + } + + throw new NoSuchAlgorithmException("No X509TrustManager in TrustManagerFactory"); + } + +} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java new file mode 100644 index 00000000..167465f6 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java @@ -0,0 +1,44 @@ +package com.usatiuk.dhfs.objects.repository.peertrust; + + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import io.quarkus.vertx.http.HttpServerOptionsCustomizer; +import io.vertx.core.http.HttpServerOptions; +import io.vertx.core.net.KeyCertOptions; +import io.vertx.core.net.TrustOptions; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import javax.net.ssl.KeyManagerFactory; +import java.security.KeyStore; +import java.security.cert.Certificate; + +@ApplicationScoped +public class PeerTrustServerCustomizer implements HttpServerOptionsCustomizer { + + @Inject + PeerTrustManager peerTrustManager; + + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Override + public void customizeHttpsServer(HttpServerOptions options) { + try { + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + ks.load(null, null); + + ks.setKeyEntry("sslkey", + persistentPeerDataService.getSelfKeypair().getPrivate(), null, + new Certificate[]{persistentPeerDataService.getSelfCertificate()}); + + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(ks, null); + + options.setKeyCertOptions(KeyCertOptions.wrap(keyManagerFactory)); + options.setTrustOptions(TrustOptions.wrap(peerTrustManager)); + } catch (Exception e) { + throw new RuntimeException("Error configuring https: ", e); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java similarity index 99% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java index 3dd12370..493a8323 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java @@ -3,13 +3,13 @@ package com.usatiuk.dhfs.objects.repository.persistence; import com.google.protobuf.ByteString; import com.google.protobuf.CodedOutputStream; import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.SerializationHelper; +import com.usatiuk.dhfs.utils.SerializationHelper; import com.usatiuk.dhfs.objects.persistence.JObjectDataP; import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; import com.usatiuk.dhfs.supportlib.DhfsSupport; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; -import com.usatiuk.utils.ByteUtils; -import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace; +import com.usatiuk.dhfs.utils.ByteUtils; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java new file mode 100644 index 00000000..3232b9f0 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +public record AvailablePeerInfo(String uuid, String addr, int port) { +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java new file mode 100644 index 00000000..2d646474 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +public record KnownPeerDelete(String uuid) { +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java new file mode 100644 index 00000000..5fbd9eb7 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +public record KnownPeerInfo(String uuid) { +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java new file mode 100644 index 00000000..f1e109f8 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +public record KnownPeerPut(String uuid) { +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java new file mode 100644 index 00000000..4d8a3102 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java @@ -0,0 +1,46 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +import com.usatiuk.dhfs.objects.repository.PeerManager; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import jakarta.inject.Inject; +import jakarta.ws.rs.DELETE; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; + +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +@Path("/objects-manage") +public class ManagementApi { + @Inject + PeerManager remoteHostManager; + + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Path("known-peers") + @GET + public List knownPeers() { + return persistentPeerDataService.getHostsNoNulls().stream().map(h -> new KnownPeerInfo(h.getUuid().toString())).toList(); + } + + @Path("known-peers") + @PUT + public void addPeer(KnownPeerPut knownPeerPut) { + remoteHostManager.addRemoteHost(UUID.fromString(knownPeerPut.uuid())); + } + + @Path("known-peers") + @DELETE + public void DeletePeer(KnownPeerDelete knownPeerDelete) { + remoteHostManager.removeRemoteHost(UUID.fromString(knownPeerDelete.uuid())); + } + + @Path("available-peers") + @GET + public Collection availablePeers() { + return remoteHostManager.getSeenButNotAddedHosts(); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java new file mode 100644 index 00000000..2f285c42 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java @@ -0,0 +1,54 @@ +package com.usatiuk.dhfs.webui; + +import io.quarkus.runtime.StartupEvent; +import io.vertx.core.http.HttpServerRequest; +import io.vertx.ext.web.Router; +import io.vertx.ext.web.RoutingContext; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Optional; + +@ApplicationScoped +public class WebUiRouter { + + @ConfigProperty(name = "dhfs.webui.root") + Optional root; + + void installRoute(@Observes StartupEvent startupEvent, Router router) { + root.ifPresent(r -> { + router.route().path("/").handler(ctx -> ctx.redirect("/webui")); + router.route() + .path("/webui/*") + .handler(this::handle); + }); + } + + public void handle(RoutingContext event) { + var indexHtml = Paths.get(root.orElseThrow(() -> new IllegalStateException("Web ui root not set but handler called")), "index.html").toString(); + + HttpServerRequest request = event.request(); + String requestedPath = Path.of(event.currentRoute().getPath()).relativize(Path.of(event.normalizedPath())).toString(); + + if ("/".equals(requestedPath)) { + request.response().sendFile(indexHtml); + return; + } + + Path requested = Paths.get(root.get(), requestedPath); + if (!requested.normalize().startsWith(Paths.get(root.get()))) { + request.response().setStatusCode(404).end(); + return; + } + + event.vertx().fileSystem().lprops(requested.toString(), exists -> { + if (exists.succeeded() && exists.result().isRegularFile()) + request.response().sendFile(requested.toString()); + else + request.response().sendFile(indexHtml); + }); + } +} diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto new file mode 100644 index 00000000..a1bc1866 --- /dev/null +++ b/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "com.usatiuk.dhfs.objects.repository.peerdiscovery"; +option java_outer_classname = "DhfsObjectPeerDiscoveryApi"; + +package dhfs.objects.peerdiscovery; + +message PeerDiscoveryInfo { + string uuid = 1; + uint32 port = 2; + uint32 securePort = 3; +} diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto new file mode 100644 index 00000000..0f93fdd5 --- /dev/null +++ b/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto @@ -0,0 +1,155 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "com.usatiuk.dhfs.objects.persistence"; +option java_outer_classname = "DhfsObjectPersistence"; + +package dhfs.objects.persistence; + +message ObjectMetadataP { + string name = 1; + map remoteCopies = 2; + string knownClass = 3; + bool seen = 4; + bool deleted = 5; + repeated string confirmedDeletes = 6; + repeated string referrers = 7; + map changelog = 8; + repeated string savedRefs = 9; + bool frozen = 10; + bool haveLocalCopy = 11; +} + +message FsNodeP { + string uuid = 1; + int64 mode = 2; + int64 ctime = 3; + int64 mtime = 4; +} + +message FilePChunksEntry { + int64 start = 1; + string id = 2; +} + +message FileP { + FsNodeP fsNode = 1; + repeated FilePChunksEntry chunks = 2; + bool symlink = 3; + int64 size = 4; +} + +message DirectoryP { + FsNodeP fsNode = 1; + map children = 2; +} + +message ChunkDataP { + string name = 1; + bytes data = 2; +} + +message PeerDirectoryP { + repeated string peers = 1; +} + +message PersistentPeerInfoP { + string uuid = 1; + bytes cert = 2; +} + +message JKleppmannTreeNodeMetaFileP { + string name = 1; + string fileIno = 2; +} + +message JKleppmannTreeNodeMetaDirectoryP { + string name = 1; +} + +message JKleppmannTreeNodeMetaP { + oneof meta { + JKleppmannTreeNodeMetaFileP jKleppmannTreeNodeMetaFile = 1; + JKleppmannTreeNodeMetaDirectoryP jKleppmannTreeNodeMetaDirectory = 2; + } +} + +message JKleppmannTreeOpP { + int64 timestamp = 1; + string peer = 2; + string newParentId = 3; + JKleppmannTreeNodeMetaP meta = 4; + string child = 5; +} + +message JKleppmannTreeNodePChildrenEntry { + string key = 1; + string value = 2; +} + +message JKleppmannTreeNodeP { + optional string parent = 1; + string id = 2; + repeated JKleppmannTreeNodePChildrenEntry children = 3; + optional JKleppmannTreeNodeMetaP meta = 4; + optional JKleppmannTreeOpP lastEffectiveOp = 5; +} + +message JKleppmannTreePersistentDataPQueueEntry { + int64 clock = 1; + string uuid = 2; + JKleppmannTreeOpP op = 3; +} + +message JKleppmannTreePersistentDataPQueue { + string node = 1; + repeated JKleppmannTreePersistentDataPQueueEntry entries = 2; +} + +message JKleppmannTreePersistentDataPTimestampEntry { + string host = 1; + int64 timestamp = 2; +} + +message JKleppmannTreeOpLogEffectP { + optional JKleppmannTreeOpP oldEffectiveMove = 1; + optional string oldParent = 2; + optional JKleppmannTreeNodeMetaP oldMeta = 3; + JKleppmannTreeOpP effectiveOp = 4; + string newParentId = 5; + JKleppmannTreeNodeMetaP newMeta = 6; + string selfId = 7; +} + +message JKleppmannTreeOpLogPEntry { + int64 clock = 1; + string uuid = 2; + JKleppmannTreeOpP op = 3; + repeated JKleppmannTreeOpLogEffectP effects = 4; +} + +message JKleppmannTreePersistentDataP { + string treeName = 1; + int64 clock = 2; + repeated JKleppmannTreePersistentDataPQueue queues = 3; + repeated JKleppmannTreePersistentDataPTimestampEntry peerLog = 4; + repeated JKleppmannTreeOpLogPEntry opLog = 5; +} + +message PeerDirectoryLocalP { + repeated string initialOpSyncDonePeers = 1; + repeated string initialObjSyncDonePeers = 2; +} + +message JObjectDataP { + oneof obj { + FileP file = 2; + DirectoryP directory = 3; + ChunkDataP chunkData = 5; + PeerDirectoryP peerDirectory = 6; + PersistentPeerInfoP persistentPeerInfo = 7; + JKleppmannTreeNodeP jKleppmannTreeNode = 8; + JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; + PeerDirectoryLocalP peerDirectoryLocal = 10; + } +} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto new file mode 100644 index 00000000..8ef94946 --- /dev/null +++ b/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto @@ -0,0 +1,102 @@ +syntax = "proto3"; + +import "dhfs_objects_serial.proto"; + +option java_multiple_files = true; +option java_package = "com.usatiuk.dhfs.objects.repository"; +option java_outer_classname = "DhfsObjectSyncApi"; + +package dhfs.objects.sync; + +service DhfsObjectSyncGrpc { + rpc GetObject (GetObjectRequest) returns (GetObjectReply) {} + rpc CanDelete (CanDeleteRequest) returns (CanDeleteReply) {} + rpc IndexUpdate (IndexUpdatePush) returns (IndexUpdateReply) {} + rpc OpPush (OpPushMsg) returns (OpPushReply) {} + + rpc Ping (PingRequest) returns (PingReply) {} +} + +message PingRequest { + string selfUuid = 1; +} + +message PingReply { + string selfUuid = 1; +} + +message ObjectChangelogEntry { + string host = 1; + uint64 version = 2; +} + +message ObjectChangelog { + repeated ObjectChangelogEntry entries = 1; +} + +message ObjectHeader { + string name = 2; + ObjectChangelog changelog = 5; + optional dhfs.objects.persistence.JObjectDataP pushedData = 6; +} + +message ApiObject { + ObjectHeader header = 1; + dhfs.objects.persistence.JObjectDataP content = 2; +} + +message GetObjectRequest { + string selfUuid = 10; + + string name = 2; +} + +message GetObjectReply { + string selfUuid = 10; + + ApiObject object = 1; +} + +message CanDeleteRequest { + string selfUuid = 10; + + string name = 2; + repeated string ourReferrers = 3; +} + +message CanDeleteReply { + string selfUuid = 10; + string objName = 1; + bool deletionCandidate = 2; + repeated string referrers = 3; +} + +message IndexUpdatePush { + string selfUuid = 10; + + ObjectHeader header = 1; +} + +message IndexUpdateReply {} + +message JKleppmannTreePeriodicPushOpP { + string fromUuid = 1; + int64 timestamp = 2; +} + +message OpPushPayload { + oneof payload { + dhfs.objects.persistence.JKleppmannTreeOpP jKleppmannTreeOpWrapper = 1; + JKleppmannTreePeriodicPushOpP jKleppmannTreePeriodicPushOp = 2; + } +} + +message OpPushMsg { + string selfUuid = 10; + string queueId = 1; + repeated OpPushPayload msg = 2; +} + +message OpPushReply { + +} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/resources/application.properties b/dhfs-parent/server-old/src/main/resources/application.properties new file mode 100644 index 00000000..8309619c --- /dev/null +++ b/dhfs-parent/server-old/src/main/resources/application.properties @@ -0,0 +1,46 @@ +quarkus.grpc.server.use-separate-server=false +dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs +dhfs.objects.root=${HOME}/dhfs_default/data/stuff +dhfs.objects.peerdiscovery.port=42069 +dhfs.objects.peerdiscovery.interval=5000 +dhfs.objects.sync.timeout=30 +dhfs.objects.sync.ping.timeout=5 +dhfs.objects.invalidation.threads=4 +dhfs.objects.invalidation.delay=1000 +dhfs.objects.reconnect_interval=5s +dhfs.objects.write_log=false +dhfs.objects.periodic-push-op-interval=5m +dhfs.fuse.root=${HOME}/dhfs_default/fuse +dhfs.fuse.debug=false +dhfs.fuse.enabled=true +dhfs.files.allow_recursive_delete=false +dhfs.files.target_chunk_size=2097152 +# Writes strictly smaller than this will try to merge with blocks nearby +dhfs.files.write_merge_threshold=0.8 +# If a merge would result in a block of greater size than this, stop merging +dhfs.files.write_merge_limit=1.2 +# Don't take blocks of this size and above when merging +dhfs.files.write_merge_max_chunk_to_take=1 +dhfs.files.write_last_chunk_limit=1.5 +dhfs.objects.writeback.delay=100 +dhfs.objects.writeback.limit=134217728 +dhfs.objects.lru.limit=134217728 +dhfs.objects.lru.print-stats=false +dhfs.objects.writeback.watermark-high=0.6 +dhfs.objects.writeback.watermark-low=0.4 +dhfs.objects.writeback.threads=4 +dhfs.objects.deletion.delay=1000 +dhfs.objects.deletion.can-delete-retry-delay=10000 +dhfs.objects.ref_verification=true +dhfs.files.use_hash_for_chunks=false +dhfs.objects.autosync.threads=2 +dhfs.objects.autosync.download-all=false +dhfs.objects.move-processor.threads=4 +dhfs.objects.ref-processor.threads=4 +dhfs.objects.opsender.batch-size=100 +dhfs.objects.lock_timeout_secs=15 +dhfs.local-discovery=true +quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE +quarkus.log.category."com.usatiuk.dhfs".level=TRACE +quarkus.http.insecure-requests=enabled +quarkus.http.ssl.client-auth=required diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java new file mode 100644 index 00000000..03f74be5 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java @@ -0,0 +1,29 @@ +package com.usatiuk.dhfs; + +import io.quarkus.test.junit.QuarkusTestProfile; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +abstract public class TempDataProfile implements QuarkusTestProfile { + protected void getConfigOverrides(Map toPut) {} + + @Override + final public Map getConfigOverrides() { + Path tempDirWithPrefix; + try { + tempDirWithPrefix = Files.createTempDirectory("dhfs-test"); + } catch (IOException e) { + throw new RuntimeException(e); + } + var ret = new HashMap(); + ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString()); + ret.put("dhfs.objects.root", tempDirWithPrefix.resolve("dhfs_root_d_test").toString()); + ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString()); + getConfigOverrides(ret); + return ret; + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java new file mode 100644 index 00000000..2a6979a6 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java @@ -0,0 +1,44 @@ +package com.usatiuk.dhfs; + +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Objects; + +@ApplicationScoped +public class TestDataCleaner { + @ConfigProperty(name = "dhfs.objects.persistence.files.root") + String tempDirectory; + @ConfigProperty(name = "dhfs.objects.root") + String tempDirectoryIdx; + + void init(@Observes @Priority(1) StartupEvent event) throws IOException { + try { + purgeDirectory(Path.of(tempDirectory).toFile()); + purgeDirectory(Path.of(tempDirectoryIdx).toFile()); + } catch (Exception ignored) { + Log.warn("Couldn't cleanup test data on init"); + } + } + + void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException { + purgeDirectory(Path.of(tempDirectory).toFile()); + purgeDirectory(Path.of(tempDirectoryIdx).toFile()); + } + + void purgeDirectory(File dir) { + for (File file : Objects.requireNonNull(dir.listFiles())) { + if (file.isDirectory()) + purgeDirectory(file); + file.delete(); + } + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java new file mode 100644 index 00000000..86ad0fb3 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java @@ -0,0 +1,83 @@ +package com.usatiuk.dhfs.benchmarks; + +import io.quarkus.logging.Log; +import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; + +import java.util.Arrays; +import java.util.function.Supplier; + +public class Benchmarker { + static long[] runLatency(Supplier fn, int iterations) { + var out = new long[iterations]; + + int hash = 1; + + for (int i = 0; i < iterations; i++) { + long startNanos = System.nanoTime(); + var cur = fn.get(); + long stopNanos = System.nanoTime(); + out[i] = stopNanos - startNanos; + hash = hash * 31 + cur.hashCode(); + } + + System.out.println("\nHash: " + hash); + + return out; + } + + static long[] runThroughput(Supplier fn, int iterations, long iterationTime) { + var out = new long[iterations]; + + int hash = 1; + + for (int i = 0; i < iterations; i++) { + long startMillis = System.currentTimeMillis(); + long count = 0; + // FIXME: That's probably janky + while (System.currentTimeMillis() - startMillis < iterationTime) { + var res = fn.get(); + count++; + hash = hash * 31 + res.hashCode(); + } + System.out.println("Ran iteration " + i + "/" + iterations + " count=" + count); + out[i] = count; + } + + System.out.println("\nHash: " + hash); + + return out; + } + + static void printStats(double[] data, String unit) { + DescriptiveStatistics stats = new DescriptiveStatistics(); + for (var r : data) { + stats.addValue(r); + } + Log.info("\n" + stats + + "\n 50%: " + stats.getPercentile(50) + " " + unit + + "\n 90%: " + stats.getPercentile(90) + " " + unit + + "\n 95%: " + stats.getPercentile(95) + " " + unit + + "\n 99%: " + stats.getPercentile(99) + " " + unit + + "\n 99.9%: " + stats.getPercentile(99.9) + " " + unit + + "\n 99.99%: " + stats.getPercentile(99.99) + " " + unit + ); + + } + + static void runAndPrintMixSimple(String name, Supplier fn, int latencyIterations, int thrptIterations, int thrptIterationTime, int warmupIterations, int warmupIterationTime) { + System.out.println("\n=========\n" + "Running " + name + "\n=========\n"); + System.out.println("==Warmup=="); + runThroughput(fn, warmupIterations, warmupIterationTime); + System.out.println("==Warmup done=="); + System.out.println("==Throughput=="); + var thrpt = runThroughput(fn, thrptIterations, thrptIterationTime); + printStats(Arrays.stream(thrpt).mapToDouble(o -> (double) o / 1000).toArray(), "ops/s"); + System.out.println("==Throughput done=="); + System.out.println("==Latency=="); + var lat = runLatency(fn, latencyIterations); + printStats(Arrays.stream(lat).mapToDouble(o -> (double) o).toArray(), "ns/op"); + System.out.println("==Latency done=="); + System.out.println("\n=========\n" + name + " done" + "\n=========\n"); + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java new file mode 100644 index 00000000..96acf3f5 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java @@ -0,0 +1,52 @@ +package com.usatiuk.dhfs.benchmarks; + +import com.google.protobuf.UnsafeByteOperations; +import com.usatiuk.dhfs.TempDataProfile; +import com.usatiuk.dhfs.files.service.DhfsFileService; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; +import java.util.Map; + +class Profiles { + public static class DhfsFuseTestProfile extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("quarkus.log.category.\"com.usatiuk.dhfs\".level", "INFO"); + ret.put("dhfs.fuse.enabled", "false"); + ret.put("dhfs.objects.ref_verification", "false"); + } + } +} + +@QuarkusTest +@TestProfile(Profiles.DhfsFuseTestProfile.class) +public class DhfsFileBenchmarkTest { + @Inject + DhfsFileService dhfsFileService; + + @Test + @Disabled + void openRootTest() { + Benchmarker.runAndPrintMixSimple("dhfsFileService.open(\"\")", + () -> { + return dhfsFileService.open(""); + }, 1_000_000, 5, 1000, 5, 1000); + } + + @Test + @Disabled + void writeMbTest() { + String file = dhfsFileService.create("/writeMbTest", 0777).get(); + var bb = ByteBuffer.allocateDirect(1024 * 1024); + Benchmarker.runAndPrintMixSimple("dhfsFileService.write(\"\")", + () -> { + var thing = UnsafeByteOperations.unsafeWrap(bb); + return dhfsFileService.write(file, dhfsFileService.size(file), thing); + }, 1_000, 10, 100, 1, 100); + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java new file mode 100644 index 00000000..93cc42b8 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.files; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.DhfsFileServiceSimpleTestProfile.class) +public class DhfsFileServiceSimpleTest extends DhfsFileServiceSimpleTestImpl { +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java new file mode 100644 index 00000000..8bea5c7e --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java @@ -0,0 +1,288 @@ +package com.usatiuk.dhfs.files; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.TempDataProfile; +import com.usatiuk.dhfs.files.objects.ChunkData; +import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.files.service.DhfsFileService; +import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; +import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +import com.usatiuk.kleppmanntree.AlreadyExistsException; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.awaitility.Awaitility.await; + +class Profiles { + public static class DhfsFileServiceSimpleTestProfile extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("dhfs.fuse.enabled", "false"); + } + } + + public static class DhfsFileServiceSimpleTestProfileNoChunking extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("dhfs.fuse.enabled", "false"); + ret.put("dhfs.files.target_chunk_size", "-1"); + } + } + + public static class DhfsFileServiceSimpleTestProfileSmallChunking extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("dhfs.fuse.enabled", "false"); + ret.put("dhfs.files.target_chunk_size", "3"); + } + } +} + +public class DhfsFileServiceSimpleTestImpl { + @Inject + DhfsFileService fileService; + @Inject + JObjectManager jObjectManager; + @Inject + JObjectTxManager jObjectTxManager; + + @Test + void readTest() { + var fuuid = UUID.randomUUID(); + { + ChunkData c1 = new ChunkData(ByteString.copyFrom("12345".getBytes())); + ChunkData c2 = new ChunkData(ByteString.copyFrom("678".getBytes())); + ChunkData c3 = new ChunkData(ByteString.copyFrom("91011".getBytes())); + File f = new File(fuuid, 777, false); + f.getChunks().put(0L, c1.getName()); + f.getChunks().put((long) c1.getBytes().size(), c2.getName()); + f.getChunks().put((long) c1.getBytes().size() + c2.getBytes().size(), c3.getName()); + + // FIXME: dhfs_files + + var c1o = new AtomicReference(); + var c2o = new AtomicReference(); + var c3o = new AtomicReference(); + var fo = new AtomicReference(); + + jObjectTxManager.executeTx(() -> { + c1o.set(jObjectManager.put(c1, Optional.of(f.getName())).getMeta().getName()); + c2o.set(jObjectManager.put(c2, Optional.of(f.getName())).getMeta().getName()); + c3o.set(jObjectManager.put(c3, Optional.of(f.getName())).getMeta().getName()); + fo.set(jObjectManager.put(f, Optional.empty()).getMeta().getName()); + }); + + var all = jObjectManager.findAll(); + Assertions.assertTrue(all.contains(c1o.get())); + Assertions.assertTrue(all.contains(c2o.get())); + Assertions.assertTrue(all.contains(c3o.get())); + Assertions.assertTrue(all.contains(fo.get())); + } + + String all = "1234567891011"; + + { + for (int start = 0; start < all.length(); start++) { + for (int end = start; end <= all.length(); end++) { + var read = fileService.read(fuuid.toString(), start, end - start); + Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray()); + } + } + } + } + + @Test + void dontMkdirTwiceTest() { + Assertions.assertDoesNotThrow(() -> fileService.mkdir("/dontMkdirTwiceTest", 777)); + Assertions.assertThrows(AlreadyExistsException.class, () -> fileService.mkdir("/dontMkdirTwiceTest", 777)); + } + + @Test + void writeTest() { + var ret = fileService.create("/writeTest", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid, 4, new byte[]{10, 11, 12}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid, 10, new byte[]{13, 14}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); + fileService.write(uuid, 6, new byte[]{15, 16}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); + fileService.write(uuid, 3, new byte[]{17, 18}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); + } + + @Test + void removeTest() { + var ret = fileService.create("/removeTest", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + fileService.unlink("/removeTest"); + Assertions.assertFalse(fileService.open("/removeTest").isPresent()); + } + + @Test + void truncateTest1() { + var ret = fileService.create("/truncateTest1", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + fileService.truncate(uuid, 20); + fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray()); + } + + @Test + void truncateTest2() { + var ret = fileService.create("/truncateTest2", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + fileService.truncate(uuid, 20); + fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray()); + } + + @Test + void truncateTest3() { + var ret = fileService.create("/truncateTest3", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + fileService.truncate(uuid, 7); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray()); + } + + @Test + void moveTest() { + var ret = fileService.create("/moveTest", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest")); + Assertions.assertFalse(fileService.open("/moveTest").isPresent()); + Assertions.assertTrue(fileService.open("/movedTest").isPresent()); + + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray()); + } + + @Test + void moveOverTest() throws InterruptedException { + var ret = fileService.create("/moveOverTest1", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + var ret2 = fileService.create("/moveOverTest2", 777); + Assertions.assertTrue(ret2.isPresent()); + var uuid2 = ret2.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}); + Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray()); + + var oldfile = jObjectManager.get(ret2.get()).orElseThrow(IllegalStateException::new); + var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); + var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); + + Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2")); + Assertions.assertFalse(fileService.open("/moveOverTest1").isPresent()); + Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent()); + + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray()); + + await().atMost(5, TimeUnit.SECONDS).until(() -> { + try { + return chunkObj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, + (m, d) -> !m.getReferrers().contains(uuid)); + } catch (DeletedObjectAccessException ignored) { + return true; + } + }); + } + + @Test + void readOverSizeTest() { + var ret = fileService.create("/readOverSizeTest", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray()); + } + + @Test + void writeOverSizeTest() { + var ret = fileService.create("/writeOverSizeTest", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19}); + Assertions.assertArrayEquals(new byte[]{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 + }, fileService.read(uuid, 0, 30).get().toByteArray()); + } + + @Test + void moveTest2() throws InterruptedException { + var ret = fileService.create("/moveTest2", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new); + var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); + var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); + + chunkObj.runReadLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + Assertions.assertTrue(m.getReferrers().contains(uuid)); + }); + + Assertions.assertTrue(fileService.rename("/moveTest2", "/movedTest2")); + Assertions.assertFalse(fileService.open("/moveTest2").isPresent()); + Assertions.assertTrue(fileService.open("/movedTest2").isPresent()); + + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray()); + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java new file mode 100644 index 00000000..5aab68e4 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.files; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.DhfsFileServiceSimpleTestProfileNoChunking.class) +public class DhfsFileServiceSimpleTestNoChunkingTest extends DhfsFileServiceSimpleTestImpl { +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java new file mode 100644 index 00000000..2d9fdd78 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.files; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.DhfsFileServiceSimpleTestProfileSmallChunking.class) +public class DhfsFileServiceSimpleTestSmallChunkingTest extends DhfsFileServiceSimpleTestImpl { +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java new file mode 100644 index 00000000..df800321 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java @@ -0,0 +1,77 @@ +package com.usatiuk.dhfs.fuse; + +import com.usatiuk.dhfs.TempDataProfile; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +class Profiles { + public static class DhfsFuseTestProfile extends TempDataProfile { + } +} + +@QuarkusTest +@TestProfile(Profiles.DhfsFuseTestProfile.class) +public class DhfsFuseTest { + @ConfigProperty(name = "dhfs.fuse.root") + String root; + + @Test + void readWriteFileTest() throws IOException, InterruptedException { + byte[] testString = "test file thing".getBytes(); + Path testPath = Path.of(root).resolve("test1"); + + Assertions.assertDoesNotThrow(() -> Files.createFile(testPath)); + Assertions.assertDoesNotThrow(() -> Files.write(testPath, testString)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testPath)); + Assertions.assertArrayEquals(Files.readAllBytes(testPath), testString); + } + + @Test + void symlinkTest() throws IOException, InterruptedException { + byte[] testString = "symlinkedfile".getBytes(); + Path testPath = Path.of(root).resolve("symlinktarget"); + Path testSymlink = Path.of(root).resolve("symlinktest"); + + Assertions.assertDoesNotThrow(() -> Files.createFile(testPath)); + Assertions.assertDoesNotThrow(() -> Files.write(testPath, testString)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testPath)); + Assertions.assertArrayEquals(Files.readAllBytes(testPath), testString); + + Assertions.assertDoesNotThrow(() -> Files.createSymbolicLink(testSymlink, testPath)); + Assertions.assertTrue(() -> Files.isSymbolicLink(testSymlink)); + Assertions.assertEquals(testPath, Files.readSymbolicLink(testSymlink)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testSymlink)); + Assertions.assertArrayEquals(Files.readAllBytes(testSymlink), testString); + } + + @Test + void dontRemoveEmptyDirTest() throws IOException { + byte[] testString = "dontRemoveEmptyDirTestStr".getBytes(); + Path testDir = Path.of(root).resolve("dontRemoveEmptyDirTestDir"); + Path testFile = testDir.resolve("dontRemoveEmptyDirTestFile"); + + Assertions.assertDoesNotThrow(() -> Files.createDirectory(testDir)); + Assertions.assertDoesNotThrow(() -> Files.createFile(testFile)); + Assertions.assertDoesNotThrow(() -> Files.write(testFile, testString)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testFile)); + Assertions.assertArrayEquals(Files.readAllBytes(testFile), testString); + + Assertions.assertThrows(Exception.class, () -> Files.delete(testDir)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testFile)); + Assertions.assertArrayEquals(Files.readAllBytes(testFile), testString); + + Assertions.assertDoesNotThrow(() -> Files.delete(testFile)); + Assertions.assertDoesNotThrow(() -> Files.delete(testDir)); + Assertions.assertFalse(Files.exists(testDir)); + Assertions.assertFalse(Files.exists(testFile)); + Assertions.assertThrows(Exception.class, () -> Files.readAllBytes(testFile)); + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java new file mode 100644 index 00000000..b9d9f92d --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java @@ -0,0 +1,352 @@ +package com.usatiuk.dhfs.integration; + +import com.github.dockerjava.api.model.Device; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.*; +import org.slf4j.LoggerFactory; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.output.WaitingConsumer; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.IOException; +import java.time.Duration; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +public class DhfsFuseIT { + GenericContainer container1; + GenericContainer container2; + + WaitingConsumer waitingConsumer1; + WaitingConsumer waitingConsumer2; + + String c1uuid; + String c2uuid; + + @BeforeEach + void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { + Network network = Network.newNetwork(); + container1 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + container2 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + + Stream.of(container1, container2).parallel().forEach(GenericContainer::start); + + waitingConsumer1 = new WaitingConsumer(); + var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("1-" + testInfo.getDisplayName()); + container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); + waitingConsumer2 = new WaitingConsumer(); + var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName()); + container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); + + c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + + Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + + var c1curl = container1.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + } + + @AfterEach + void stop() { + Stream.of(container1, container2).parallel().forEach(GenericContainer::stop); + } + + @Test + void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + @Test + void readWriteRewriteFileTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + @Test + void createDelayedTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /root/dhfs_default/fuse/testf2").getExitCode()); + + client.unpauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); + } + + @Test + void writeRewriteDelayedTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); + + client.unpauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> + "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + // TODO: How this fits with the tree? + @Test + @Disabled + void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode()); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1); + + client.unpauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); + } + + @Test + void deleteTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + Log.info("Deleting"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + Log.info("Deleted"); + + // FIXME? + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); + + await().atMost(45, TimeUnit.SECONDS).until(() -> + 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); + } + + @Test + void moveFileTest() throws IOException, InterruptedException, TimeoutException { + Log.info("Creating"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + Log.info("Listing"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); + Log.info("Moving"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testf1 /root/dhfs_default/fuse/testf2").getExitCode()); + Log.info("Listing"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); + Log.info("Reading"); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); + } + + @Test + void moveDirTest() throws IOException, InterruptedException, TimeoutException { + Log.info("Creating"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testdir/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir/testf1").getStdout())); + Log.info("Listing"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); + Log.info("Moving"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir2").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testdir /root/dhfs_default/fuse/testdir2/testdirm").getExitCode()); + Log.info("Listing"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); + Log.info("Reading"); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir2/testdirm/testf1").getStdout())); + } + + + // TODO: This probably shouldn't be working right now + @Test + void removeAddHostTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var c2curl = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request DELETE " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /root/dhfs_default/fuse/newfile1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /root/dhfs_default/fuse/newfile1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /root/dhfs_default/fuse/newfile2").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /root/dhfs_default/fuse/newfile2").getExitCode()); + + Log.info("Re-adding"); + container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + Log.info("Listing removeAddHostTest"); + var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/"); + var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/"); + Log.info(cat1); + Log.info(cat2); + Log.info(ls1); + Log.info(ls2); + + return cat1.getStdout().contains("jioadsd") && cat1.getStdout().contains("asvdkljm") && cat1.getStdout().contains("dfgvh") && cat1.getStdout().contains("dscfg") + && cat2.getStdout().contains("jioadsd") && cat2.getStdout().contains("asvdkljm") && cat2.getStdout().contains("dfgvh") && cat2.getStdout().contains("dscfg"); + }); + } + + @Test + void dirConflictTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /root/dhfs_default/fuse/testf"), + Pair.of(container2, "echo test2 >> /root/dhfs_default/fuse/testf")).parallel().map(p -> { + try { + return p.getLeft().execInContainer("/bin/sh", "-c", p.getRight()).getExitCode(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }).anyMatch(r -> r != 0); + Assumptions.assumeTrue(!createFail, "Failed creating one or more files"); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var ls = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); + var cat = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + Log.info(ls); + Log.info(cat); + return cat.getStdout().contains("test1") && cat.getStdout().contains("test2"); + }); + } + + @Test + void dirCycleTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/a").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/b").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /root/dhfs_default/fuse/a/testfa").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /root/dhfs_default/fuse/b/testfb").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /root/dhfs_default/fuse").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var c2ls = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f -exec cat {} \\;"); + return c2ls.getExitCode() == 0 && c2ls.getStdout().contains("xqr489") && c2ls.getStdout().contains("ahinou"); + }); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container1.getContainerId()).exec(); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/a /root/dhfs_default/fuse/b").getExitCode()); + client.pauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container1.getContainerId()).exec(); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/b /root/dhfs_default/fuse/a").getExitCode()); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + Log.info("Listing dirCycleTest"); + Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse")); + Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a")); + Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b")); + Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse")); + Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a")); + Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b")); + + var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;"); + Log.info(c1ls2); + var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;"); + Log.info(c2ls2); + + return c1ls2.getStdout().contains("xqr489") && c1ls2.getStdout().contains("ahinou") + && c2ls2.getStdout().contains("xqr489") && c2ls2.getStdout().contains("ahinou") + && c1ls2.getExitCode() == 0 && c2ls2.getExitCode() == 0; + }); + + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java new file mode 100644 index 00000000..b401b053 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java @@ -0,0 +1,293 @@ +package com.usatiuk.dhfs.integration; + +import com.github.dockerjava.api.model.Device; +import io.quarkus.logging.Log; +import org.junit.jupiter.api.*; +import org.slf4j.LoggerFactory; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.output.WaitingConsumer; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +public class DhfsFusex3IT { + GenericContainer container1; + GenericContainer container2; + GenericContainer container3; + + WaitingConsumer waitingConsumer1; + WaitingConsumer waitingConsumer2; + WaitingConsumer waitingConsumer3; + + String c1uuid; + String c2uuid; + String c3uuid; + + // This calculation is somewhat racy, so keep it hardcoded for now + long emptyFileCount = 9; + + @BeforeEach + void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { + // TODO: Dedup + Network network = Network.newNetwork(); + + container1 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + container2 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + container3 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + + + Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::start); + + c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + + Log.info(container1.getContainerId() + "=" + c1uuid); + Log.info(container2.getContainerId() + "=" + c2uuid); + Log.info(container3.getContainerId() + "=" + c3uuid); + + waitingConsumer1 = new WaitingConsumer(); + var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) + .withPrefix(c1uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); + container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); + waitingConsumer2 = new WaitingConsumer(); + var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) + .withPrefix(c2uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); + container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); + waitingConsumer3 = new WaitingConsumer(); + var loggingConsumer3 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) + .withPrefix(c3uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); + container3.followOutput(loggingConsumer3.andThen(waitingConsumer3)); + + Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c3uuid)); + + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); + + var c1curl = container1.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl1 = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl3 = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c3uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c3curl = container3.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + } + + private boolean checkEmpty() throws IOException, InterruptedException { + for (var container : List.of(container1, container2, container3)) { + var found = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f"); + var foundWc = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f | wc -l"); + Log.info("Remaining objects in " + container.getContainerId() + ": " + found.toString() + " " + foundWc.toString()); + if (!(found.getExitCode() == 0 && foundWc.getExitCode() == 0 && Integer.parseInt(foundWc.getStdout().strip()) == emptyFileCount)) + return false; + } + return true; + } + + @AfterEach + void stop() { + Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::stop); + } + + @Test + void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + @Test + void largerFileDeleteTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty()); + } + + @Test + void largerFileDeleteTestNoDelays() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty()); + } + + @Test + void gccHelloWorldTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include\nint main(){printf(\"hello world\"); return 0;}' > /root/dhfs_default/fuse/hello.c").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && gcc hello.c").getExitCode()); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var helloOut = container1.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); + Log.info(helloOut); + return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); + }); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var helloOut = container2.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); + Log.info(helloOut); + return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); + }); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var helloOut = container3.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); + Log.info(helloOut); + return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); + }); + } + + @Test + void removeHostTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var c3curl = container3.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request DELETE " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + @Test + void dirConflictTest() throws IOException, InterruptedException, TimeoutException { + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container1.getContainerId()).exec(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + // Pauses needed as otherwise docker buffers some incoming packets + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.pauseContainerCmd(container3.getContainerId()).exec(); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.pauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container1.getContainerId()).exec(); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container3.getContainerId()).exec(); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + for (var c : List.of(container1, container2, container3)) { + var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); + var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + Log.info(ls); + Log.info(cat); + if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3"))) + return false; + } + return true; + }); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && + container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && + container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout()); + }); + } + + @Test + void fileConflictTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout())); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container1.getContainerId()).exec(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + // Pauses needed as otherwise docker buffers some incoming packets + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.pauseContainerCmd(container3.getContainerId()).exec(); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.pauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container1.getContainerId()).exec(); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container3.getContainerId()).exec(); + Log.warn("Waiting for connections"); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + Log.warn("Connected"); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + for (var c : List.of(container1, container2, container3)) { + var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); + var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + Log.info(ls); + Log.info(cat); + if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3"))) + return false; + } + return true; + }); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && + container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && + container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout()); + }); + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java new file mode 100644 index 00000000..5bec10e9 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java @@ -0,0 +1,93 @@ +package com.usatiuk.dhfs.integration; + +import io.quarkus.logging.Log; +import org.jetbrains.annotations.NotNull; +import org.testcontainers.images.builder.ImageFromDockerfile; + +import java.nio.file.Paths; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class DhfsImage implements Future { + + private static String _builtImage = null; + private static DhfsImage INSTANCE = new DhfsImage(); + + private DhfsImage() {} + + public static DhfsImage getInstance() { + return INSTANCE; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public String get() throws InterruptedException, ExecutionException { + return buildImpl(); + } + + @Override + public String get(long timeout, @NotNull TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return buildImpl(); + } + + private synchronized String buildImpl() { + if (_builtImage != null) { + return _builtImage; + } + + Log.info("Building image"); + + String buildPath = System.getProperty("buildDirectory"); + String nativeLibsDirectory = System.getProperty("nativeLibsDirectory"); + Log.info("Build path: " + buildPath); + Log.info("Native libs path: " + nativeLibsDirectory); + + var image = new ImageFromDockerfile() + .withDockerfileFromBuilder(builder -> + builder + .from("azul/zulu-openjdk-debian:21-jre-headless-latest") + .run("apt update && apt install -y libfuse2 curl gcc") + .copy("/app", "/app") + .copy("/libs", "/libs") + .cmd("java", "-ea", "-Xmx128M", + "--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED", + "--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED", + "-Ddhfs.objects.peerdiscovery.interval=100", + "-Ddhfs.objects.invalidation.delay=100", + "-Ddhfs.objects.deletion.delay=0", + "-Ddhfs.objects.deletion.can-delete-retry-delay=1000", + "-Ddhfs.objects.ref_verification=true", + "-Ddhfs.objects.write_log=true", + "-Ddhfs.objects.sync.timeout=10", + "-Ddhfs.objects.sync.ping.timeout=5", + "-Ddhfs.objects.reconnect_interval=1s", + "-Dcom.usatiuk.dhfs.supportlib.native-path=/libs", + "-Dquarkus.log.category.\"com.usatiuk\".level=TRACE", + "-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE", + "-Ddhfs.objects.periodic-push-op-interval=5s", + "-jar", "/app/quarkus-run.jar") + .build()) + .withFileFromPath("/app", Paths.get(buildPath, "quarkus-app")) + .withFileFromPath("/libs", Paths.get(nativeLibsDirectory)); + + _builtImage = image.get(); + Log.info("Image built: " + _builtImage); + return _builtImage; + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java new file mode 100644 index 00000000..07a929e4 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java @@ -0,0 +1,135 @@ +package com.usatiuk.dhfs.integration; + +import com.github.dockerjava.api.model.Device; +import org.junit.jupiter.api.*; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.output.WaitingConsumer; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.IOException; +import java.time.Duration; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +public class ResyncIT { + GenericContainer container1; + GenericContainer container2; + + WaitingConsumer waitingConsumer1; + WaitingConsumer waitingConsumer2; + + String c1uuid; + String c2uuid; + + @BeforeEach + void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { + Network network = Network.newNetwork(); + + container1 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + container2 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + + Stream.of(container1, container2).parallel().forEach(GenericContainer::start); + + waitingConsumer1 = new WaitingConsumer(); + var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("1-" + testInfo.getDisplayName()); + container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); + waitingConsumer2 = new WaitingConsumer(); + var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName()); + container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); + } + + @AfterEach + void stop() { + Stream.of(container1, container2).parallel().forEach(GenericContainer::stop); + } + + @Test + void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + + Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + + var c1curl = container1.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + + @Test + void manyFiles() throws IOException, InterruptedException, TimeoutException { + var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test$i; done"); + Assertions.assertEquals(0, ret.getExitCode()); + var foundWc = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); + Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip())); + + ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test-2-$i; done"); + Assertions.assertEquals(0, ret.getExitCode()); + foundWc = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); + Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip())); + + c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + + Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + + var c1curl = container1.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); + return 400 == Integer.valueOf(foundWc2.getStdout().strip()); + }); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); + return 400 == Integer.valueOf(foundWc2.getStdout().strip()); + }); + } + +} diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java similarity index 100% rename from dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java rename to dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java similarity index 100% rename from dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java rename to dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java diff --git a/dhfs-parent/server-old/src/test/resources/application.properties b/dhfs-parent/server-old/src/test/resources/application.properties new file mode 100644 index 00000000..64f51835 --- /dev/null +++ b/dhfs-parent/server-old/src/test/resources/application.properties @@ -0,0 +1,11 @@ +dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test +dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test +dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test +dhfs.objects.ref_verification=true +dhfs.objects.deletion.delay=0 +quarkus.log.category."com.usatiuk.dhfs".level=TRACE +quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE +quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib +quarkus.http.test-port=0 +quarkus.http.test-ssl-port=0 +dhfs.local-discovery=false \ No newline at end of file diff --git a/dhfs-parent/server/docker-compose.yml b/dhfs-parent/server/docker-compose.yml index a6a0aefa..c45708ba 100644 --- a/dhfs-parent/server/docker-compose.yml +++ b/dhfs-parent/server/docker-compose.yml @@ -31,6 +31,7 @@ services: - ./target/quarkus-app:/app command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED + --add-opens=java.base/java.nio=ALL-UNNAMED -Ddhfs.objects.persistence.files.root=/dhfs_root/p -Ddhfs.objects.root=/dhfs_root/d -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0 diff --git a/dhfs-parent/server/pom.xml b/dhfs-parent/server/pom.xml index d9e34d5d..53daeb0e 100644 --- a/dhfs-parent/server/pom.xml +++ b/dhfs-parent/server/pom.xml @@ -51,7 +51,6 @@ net.openhft zero-allocation-hashing - 0.16 io.quarkus @@ -86,11 +85,6 @@ quarkus-junit5 test - - org.projectlombok - lombok - provided - com.github.SerCeMan jnr-fuse @@ -132,6 +126,10 @@ org.apache.commons commons-collections4 + + org.pcollections + pcollections + org.apache.commons commons-math3 @@ -147,6 +145,16 @@ supportlib 1.0-SNAPSHOT + + com.usatiuk.dhfs + objects + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + utils + 1.0-SNAPSHOT + @@ -166,7 +174,7 @@ - true + false concurrent diff --git a/dhfs-parent/server/src/lombok.config b/dhfs-parent/server/src/lombok.config deleted file mode 100644 index f1c474ce..00000000 --- a/dhfs-parent/server/src/lombok.config +++ /dev/null @@ -1 +0,0 @@ -lombok.accessors.prefix += _ diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java index dcd379a8..7074af8d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java @@ -14,7 +14,7 @@ import java.nio.file.Paths; @ApplicationScoped public class ShutdownChecker { private static final String dataFileName = "running"; - @ConfigProperty(name = "dhfs.objects.root") + @ConfigProperty(name = "dhfs.objects.persistence.files.root") String dataRoot; boolean _cleanShutdown = true; boolean _initialized = false; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index 46f8e283..2f41e743 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -1,90 +1,13 @@ package com.usatiuk.dhfs.files.objects; import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.files.conflicts.NoOpConflictResolver; -import com.usatiuk.dhfs.objects.jrepository.AssumedUnique; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.Leaf; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import net.openhft.hashing.LongTupleHashFunction; - -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -@AssumedUnique -@Leaf -public class ChunkData extends JObjectData { - final ChunkDataP _data; - - public ChunkData(ByteString bytes) { - super(); - _data = ChunkDataP.newBuilder() - .setData(bytes) - // TODO: There might be (most definitely) a copy there - .setName(Arrays.stream(LongTupleHashFunction.xx128().hashBytes(bytes.asReadOnlyByteBuffer())) - .mapToObj(Long::toHexString).collect(Collectors.joining())) - .build(); - } - - public ChunkData(ByteString bytes, String name) { - super(); - _data = ChunkDataP.newBuilder() - .setData(bytes) - .setName(name) - .build(); - } - - public ChunkData(ChunkDataP chunkDataP) { - super(); - _data = chunkDataP; - } - - ChunkDataP getData() { - return _data; - } - - public ByteString getBytes() { - return _data.getData(); - } - - public int getSize() { - return _data.getData().size(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ChunkData chunkData = (ChunkData) o; - return Objects.equals(getName(), chunkData.getName()); - } - - @Override - public int hashCode() { - return Objects.hashCode(getName()); - } - - @Override - public String getName() { - return _data.getName(); - } - - @Override - public Class getConflictResolver() { - return NoOpConflictResolver.class; - } - - @Override - public Collection extractRefs() { - return List.of(); - } +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.JObjectKey; +public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote { @Override public int estimateSize() { - return _data.getData().size(); + return data.size(); } -} +} \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index 0c6fa4e8..d6a4084c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -1,51 +1,45 @@ package com.usatiuk.dhfs.files.objects; -import com.usatiuk.dhfs.files.conflicts.FileConflictResolver; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import lombok.Getter; -import lombok.Setter; +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.jmap.JMapHolder; +import com.usatiuk.dhfs.objects.jmap.JMapLongKey; -import java.util.*; +import java.util.Collection; +import java.util.Set; -public class File extends FsNode { - @Getter - private final NavigableMap _chunks; - @Getter - private final boolean _symlink; - @Getter - @Setter - private long _size = 0; - - public File(UUID uuid, long mode, boolean symlink) { - super(uuid, mode); - _symlink = symlink; - _chunks = new TreeMap<>(); +public record File(JObjectKey key, long mode, long cTime, long mTime, + boolean symlink, long size +) implements JDataRemote, JMapHolder { + public File withSymlink(boolean symlink) { + return new File(key, mode, cTime, mTime, symlink, size); } - public File(UUID uuid, long mode, boolean symlink, NavigableMap chunks) { - super(uuid, mode); - _symlink = symlink; - _chunks = chunks; + public File withSize(long size) { + return new File(key, mode, cTime, mTime, symlink, size); + } + + public File withMode(long mode) { + return new File(key, mode, cTime, mTime, symlink, size); + } + + public File withCTime(long cTime) { + return new File(key, mode, cTime, mTime, symlink, size); + } + + public File withMTime(long mTime) { + return new File(key, mode, cTime, mTime, symlink, size); } @Override - public Class getConflictResolver() { - return FileConflictResolver.class; - } - - @Override - public Class getRefType() { - return ChunkData.class; - } - - @Override - public Collection extractRefs() { - return Collections.unmodifiableCollection(_chunks.values()); + public Collection collectRefsTo() { + return Set.of(); +// return Set.copyOf(chunks().values()); } @Override public int estimateSize() { - return _chunks.size() * 192; + return 64; +// return chunks.size() * 64; } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java index 58678dd2..e5cb03e3 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java @@ -3,49 +3,49 @@ package com.usatiuk.dhfs.files.service; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.objects.jrepository.JObject; +import com.usatiuk.dhfs.objects.JObjectKey; import org.apache.commons.lang3.tuple.Pair; import java.util.Optional; public interface DhfsFileService { - Optional open(String name); + Optional open(String name); - Optional create(String name, long mode); + Optional create(String name, long mode); - Pair inoToParent(String ino); + Pair inoToParent(JObjectKey ino); void mkdir(String name, long mode); - Optional getattr(String name); + Optional getattr(JObjectKey name); - Boolean chmod(String name, long mode); + Boolean chmod(JObjectKey name, long mode); void unlink(String name); Boolean rename(String from, String to); - Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs); + Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs); Iterable readDir(String name); - void updateFileSize(JObject file); + void updateFileSize(File file); - Long size(String f); + Long size(JObjectKey f); - Optional read(String fileUuid, long offset, int length); + Optional read(JObjectKey fileUuid, long offset, int length); - Long write(String fileUuid, long offset, ByteString data); + Long write(JObjectKey fileUuid, long offset, ByteString data); - default Long write(String fileUuid, long offset, byte[] data) { + default Long write(JObjectKey fileUuid, long offset, byte[] data) { return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data)); } - Boolean truncate(String fileUuid, long length); + Boolean truncate(JObjectKey fileUuid, long length); - String readlink(String uuid); + String readlink(JObjectKey uuid); - ByteString readlinkBS(String uuid); + ByteString readlinkBS(JObjectKey uuid); - String symlink(String oldpath, String newpath); + JObjectKey symlink(String oldpath, String newpath); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 08bf639f..0bcfbb90 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -4,18 +4,19 @@ import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.files.objects.FsNode; +import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; -import com.usatiuk.dhfs.objects.jrepository.JMutator; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace; +import com.usatiuk.dhfs.objects.jmap.JMapEntry; +import com.usatiuk.dhfs.objects.jmap.JMapHelper; +import com.usatiuk.dhfs.objects.jmap.JMapLongKey; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; @@ -35,9 +36,11 @@ import java.util.stream.StreamSupport; @ApplicationScoped public class DhfsFileServiceImpl implements DhfsFileService { @Inject - JObjectManager jObjectManager; + Transaction curTx; @Inject - JObjectTxManager jObjectTxManager; + RemoteTransaction remoteTx; + @Inject + TransactionManager jObjectTxManager; @ConfigProperty(name = "dhfs.files.target_chunk_size") int targetChunkSize; @@ -66,73 +69,73 @@ public class DhfsFileServiceImpl implements DhfsFileService { @ConfigProperty(name = "dhfs.objects.write_log") boolean writeLogging; - @Inject - PersistentPeerDataService persistentPeerDataService; @Inject JKleppmannTreeManager jKleppmannTreeManager; - private JKleppmannTreeManager.JKleppmannTree _tree; + @Inject + JMapHelper jMapHelper; + + private JKleppmannTreeManager.JKleppmannTree getTree() { + return jKleppmannTreeManager.getTree(new JObjectKey("fs")); + } private ChunkData createChunk(ByteString bytes) { - if (useHashForChunks) { - return new ChunkData(bytes); - } else { - return new ChunkData(bytes, persistentPeerDataService.getUniqueId()); - } + var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes); + remoteTx.putData(newChunk); + return newChunk; } void init(@Observes @Priority(500) StartupEvent event) { Log.info("Initializing file service"); - _tree = jKleppmannTreeManager.getTree("fs"); + getTree(); } - private JObject getDirEntry(String name) { - var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); + private JKleppmannTreeNode getDirEntry(String name) { + var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND); - var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); - if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) - throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); - return (JObject) ret; + var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); + return ret; } - private Optional> getDirEntryOpt(String name) { - var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); + private Optional getDirEntryOpt(String name) { + var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); if (res == null) return Optional.empty(); - var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); - if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) - throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); - return Optional.of((JObject) ret); + var ret = curTx.get(JKleppmannTreeNode.class, res); + return ret; } @Override - public Optional getattr(String uuid) { + public Optional getattr(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var ref = jObjectManager.get(uuid); - if (ref.isEmpty()) return Optional.empty(); - return ref.get().runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - GetattrRes ret; - if (d instanceof File f) { - ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.isSymlink() ? GetattrType.SYMLINK : GetattrType.FILE); - } else if (d instanceof JKleppmannTreeNode) { - ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); + var ref = curTx.get(JData.class, uuid).orElse(null); + if (ref == null) return Optional.empty(); + GetattrRes ret; + if (ref instanceof RemoteObjectMeta r) { + var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null); + if (remote instanceof File f) { + ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE); } else { - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key())); } - return Optional.of(ret); - }); + } else if (ref instanceof JKleppmannTreeNode) { + ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); + } else { + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key())); + } + return Optional.of(ret); }); } @Override - public Optional open(String name) { + public Optional open(String name) { return jObjectTxManager.executeTx(() -> { try { var ret = getDirEntry(name); - return Optional.of(ret.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) return f.getFileIno(); - else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return m.getName(); - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); - })); + return switch (ret.meta()) { + case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno()); + case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key()); + default -> Optional.empty(); + }; } catch (StatusRuntimeException e) { if (e.getStatus().getCode() == Status.Code.NOT_FOUND) { return Optional.empty(); @@ -142,17 +145,13 @@ public class DhfsFileServiceImpl implements DhfsFileService { }); } - private void ensureDir(JObject entry) { - entry.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) - throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription(m.getName() + " is a file, not directory")); - else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return null; - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); - }); + private void ensureDir(JKleppmannTreeNode entry) { + if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory)) + throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key())); } @Override - public Optional create(String name, long mode) { + public Optional create(String name, long mode) { return jObjectTxManager.executeTx(() -> { Path path = Path.of(name); var parent = getDirEntry(path.getParent().toString()); @@ -163,30 +162,26 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = new File(fuuid, mode, false); + File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false, 0); + remoteTx.putData(f); - var newNodeId = _tree.getNewNodeId(); - var fobj = jObjectManager.putLocked(f, Optional.of(newNodeId)); try { - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); + getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId()); } catch (Exception e) { - fobj.getMeta().removeRef(newNodeId); +// fobj.getMeta().removeRef(newNodeId); throw e; - } finally { - fobj.rwUnlock(); } - return Optional.of(f.getName()); + return Optional.of(f.key()); }); } //FIXME: Slow.. @Override - public Pair inoToParent(String ino) { + public Pair inoToParent(JObjectKey ino) { return jObjectTxManager.executeTx(() -> { - return _tree.findParent(w -> { - if (w.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) - if (f.getFileIno().equals(ino)) - return true; + return getTree().findParent(w -> { + if (w.meta() instanceof JKleppmannTreeNodeMetaFile f) + return f.getFileIno().equals(ino); return false; }); }); @@ -203,7 +198,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { Log.debug("Creating directory " + name); - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaDirectory(dname), _tree.getNewNodeId()); + getTree().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId()); }); } @@ -211,13 +206,11 @@ public class DhfsFileServiceImpl implements DhfsFileService { public void unlink(String name) { jObjectTxManager.executeTx(() -> { var node = getDirEntryOpt(name).orElse(null); - JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) - if (!d.getNode().getChildren().isEmpty()) throw new DirectoryNotEmptyException(); - return d.getNode().getMeta(); - }); - - _tree.trash(meta, node.getMeta().getName()); + if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) { + if (!allowRecursiveDelete && !node.children().isEmpty()) + throw new DirectoryNotEmptyException(); + } + getTree().trash(node.meta(), node.key()); }); } @@ -225,37 +218,35 @@ public class DhfsFileServiceImpl implements DhfsFileService { public Boolean rename(String from, String to) { return jObjectTxManager.executeTx(() -> { var node = getDirEntry(from); - JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> d.getNode().getMeta()); + JKleppmannTreeNodeMeta meta = node.meta(); var toPath = Path.of(to); var toDentry = getDirEntry(toPath.getParent().toString()); ensureDir(toDentry); - _tree.move(toDentry.getMeta().getName(), meta.withName(toPath.getFileName().toString()), node.getMeta().getName()); - + getTree().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key()); return true; }); } @Override - public Boolean chmod(String uuid, long mode) { + public Boolean chmod(JObjectKey uuid, long mode) { return jObjectTxManager.executeTx(() -> { - var dent = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); + var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); - dent.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d, bump, i) -> { - if (d instanceof JKleppmannTreeNode) { - return null;//FIXME:? - } else if (d instanceof File f) { - bump.apply(); - f.setMtime(System.currentTimeMillis()); - f.setMode(mode); + if (dent instanceof JKleppmannTreeNode) { + return true; + } else if (dent instanceof RemoteObjectMeta) { + var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null); + if (remote instanceof File f) { + remoteTx.putData(f.withMode(mode).withMTime(System.currentTimeMillis())); + return true; } else { throw new IllegalArgumentException(uuid + " is not a file"); } - return null; - }); - - return true; + } else { + throw new IllegalArgumentException(uuid + " is not a file"); + } }); } @@ -264,81 +255,68 @@ public class DhfsFileServiceImpl implements DhfsFileService { return jObjectTxManager.executeTx(() -> { var found = getDirEntry(name); - return found.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (!(d instanceof JKleppmannTreeNode) || !(d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - return new ArrayList<>(d.getNode().getChildren().keySet()); - }); + if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + return found.children().keySet(); }); } @Override - public Optional read(String fileUuid, long offset, int length) { + public Optional read(JObjectKey fileUuid, long offset, int length) { return jObjectTxManager.executeTx(() -> { if (length < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); if (offset < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); - var fileOpt = jObjectManager.get(fileUuid); - if (fileOpt.isEmpty()) { + var file = remoteTx.getData(File.class, fileUuid).orElse(null); + if (file == null) { Log.error("File not found when trying to read: " + fileUuid); return Optional.empty(); } - var file = fileOpt.get(); - try { - return file.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { - if (!(fileData instanceof File)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - var chunksAll = ((File) fileData).getChunks(); - if (chunksAll.isEmpty()) { - return Optional.of(ByteString.empty()); - } - var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); + try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) { + if (!it.hasNext()) + return Optional.of(ByteString.empty()); - if (chunksList.isEmpty()) { - return Optional.of(ByteString.empty()); - } +// if (it.peekNextKey().key() != offset) { +// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey()); +// return Optional.of(ByteString.empty()); +// } + long curPos = offset; + ByteString buf = ByteString.empty(); - var chunks = chunksList.iterator(); - ByteString buf = ByteString.empty(); + var chunk = it.next(); - long curPos = offset; - var chunk = chunks.next(); + while (curPos < offset + length) { + var chunkPos = chunk.getKey().key(); - while (curPos < offset + length) { - var chunkPos = chunk.getKey(); + long offInChunk = curPos - chunkPos; - long offInChunk = curPos - chunkPos; + long toReadInChunk = (offset + length) - curPos; - long toReadInChunk = (offset + length) - curPos; + var chunkBytes = readChunk(chunk.getValue().ref()); - var chunkBytes = readChunk(chunk.getValue()); + long readableLen = chunkBytes.size() - offInChunk; - long readableLen = chunkBytes.size() - offInChunk; + var toReadReally = Math.min(readableLen, toReadInChunk); - var toReadReally = Math.min(readableLen, toReadInChunk); + if (toReadReally < 0) break; - if (toReadReally < 0) break; + buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); - buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); + curPos += toReadReally; - curPos += toReadReally; + if (readableLen > toReadInChunk) + break; - if (readableLen > toReadInChunk) - break; + if (!it.hasNext()) break; - if (!chunks.hasNext()) break; + chunk = it.next(); + } - chunk = chunks.next(); - } - - // FIXME: - return Optional.of(buf); - }); + return Optional.of(buf); } catch (Exception e) { Log.error("Error reading file: " + fileUuid, e); return Optional.empty(); @@ -346,357 +324,379 @@ public class DhfsFileServiceImpl implements DhfsFileService { }); } - private ByteString readChunk(String uuid) { - var chunkRead = jObjectManager.get(uuid).orElse(null); + private ByteString readChunk(JObjectKey uuid) { + var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null); if (chunkRead == null) { Log.error("Chunk requested not found: " + uuid); throw new StatusRuntimeException(Status.NOT_FOUND); } - return chunkRead.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (!(d instanceof ChunkData cd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - return cd.getBytes(); - }); + return chunkRead.data(); } - private int getChunkSize(String uuid) { + private int getChunkSize(JObjectKey uuid) { return readChunk(uuid).size(); } - private void cleanupChunks(File f, Collection uuids) { + private void cleanupChunks(File f, Collection uuids) { // FIXME: - var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet(); - for (var cuuid : uuids) { - try { - if (inFile.contains(cuuid)) continue; - jObjectManager.get(cuuid) - .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, - (m, d, b, v) -> { - m.removeRef(f.getName()); - return null; - })); - } catch (Exception e) { - Log.error("Error when cleaning chunk " + cuuid, e); - } - } +// var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet(); +// for (var cuuid : uuids) { +// try { +// if (inFile.contains(cuuid)) continue; +// jObjectManager.get(cuuid) +// .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, +// (m, d, b, v) -> { +// m.removeRef(f.getName()); +// return null; +// })); +// } catch (Exception e) { +// Log.error("Error when cleaning chunk " + cuuid, e); +// } +// } } @Override - public Long write(String fileUuid, long offset, ByteString data) { + public Long write(JObjectKey fileUuid, long offset, ByteString data) { return jObjectTxManager.executeTx(() -> { if (offset < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); // FIXME: - var file = (JObject) jObjectManager.get(fileUuid).orElse(null); + var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null); if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); + Log.error("File not found when trying to write: " + fileUuid); return -1L; } - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - // FIXME: - if (!(file.getData() instanceof File)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + if (writeLogging) { + Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " " + + offset + " " + data.size()); + } - if (writeLogging) { - Log.info("Writing to file: " + file.getMeta().getName() + " size=" + size(fileUuid) + " " - + offset + " " + data.size()); - } + if (size(fileUuid) < offset) { + truncate(fileUuid, offset); + file = remoteTx.getData(File.class, fileUuid).orElse(null); + } - if (size(fileUuid) < offset) - truncate(fileUuid, offset); + Pair> first; + Pair> last; + Log.tracev("Getting last"); + try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(offset + data.size()))) { + last = it.hasNext() ? it.next() : null; + Log.tracev("Last: {0}", last); + } - // FIXME: Some kind of immutable interface? - var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); - var first = chunksAll.floorEntry(offset); - var last = chunksAll.lowerEntry(offset + data.size()); - NavigableMap removedChunks = new TreeMap<>(); + NavigableMap removedChunks = new TreeMap<>(); - long start = 0; + long start = 0; - NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); - NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); - - if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { - beforeFirst = chunksAll; - afterLast = Collections.emptyNavigableMap(); + try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) { + first = it.hasNext() ? it.next() : null; + Log.tracev("First: {0}", first); + boolean empty = last == null; + if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) { first = null; last = null; start = offset; - } else if (!chunksAll.isEmpty()) { - var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); - removedChunks.putAll(between); - start = first.getKey(); - } - - ByteString pendingWrites = ByteString.empty(); - - if (first != null && first.getKey() < offset) { - var chunkBytes = readChunk(first.getValue()); - pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); - } - pendingWrites = pendingWrites.concat(data); - - if (last != null) { - var lchunkBytes = readChunk(last.getValue()); - if (last.getKey() + lchunkBytes.size() > offset + data.size()) { - var startInFile = offset + data.size(); - var startInChunk = startInFile - last.getKey(); - pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); + } else if (!empty) { + assert first != null; + removedChunks.put(first.getKey().key(), first.getValue().ref()); + while (it.hasNext() && it.peekNextKey().compareTo(last.getKey()) <= 0) { + var next = it.next(); + Log.tracev("Next: {0}", next); + removedChunks.put(next.getKey().key(), next.getValue().ref()); } + removedChunks.put(last.getKey().key(), last.getValue().ref()); + start = first.getKey().key(); } - - int combinedSize = pendingWrites.size(); - - if (targetChunkSize > 0) { - if (combinedSize < (targetChunkSize * writeMergeThreshold)) { - boolean leftDone = false; - boolean rightDone = false; - while (!leftDone && !rightDone) { - if (beforeFirst.isEmpty()) leftDone = true; - if (!beforeFirst.isEmpty() || !leftDone) { - var takeLeft = beforeFirst.lastEntry(); - - var cuuid = takeLeft.getValue(); - - if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { - leftDone = true; - continue; - } - - if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { - leftDone = true; - continue; - } - - // FIXME: (and test this) - beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); - start = takeLeft.getKey(); - pendingWrites = readChunk(cuuid).concat(pendingWrites); - combinedSize += getChunkSize(cuuid); - removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); - } - if (afterLast.isEmpty()) rightDone = true; - if (!afterLast.isEmpty() && !rightDone) { - var takeRight = afterLast.firstEntry(); - - var cuuid = takeRight.getValue(); - - if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { - rightDone = true; - continue; - } - - if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { - rightDone = true; - continue; - } - - // FIXME: (and test this) - afterLast = afterLast.tailMap(takeRight.getKey(), false); - pendingWrites = pendingWrites.concat(readChunk(cuuid)); - combinedSize += getChunkSize(cuuid); - removedChunks.put(takeRight.getKey(), takeRight.getValue()); - } - } - } - } - - NavigableMap newChunks = new TreeMap<>(); - - { - int cur = 0; - while (cur < combinedSize) { - int end; - - if (targetChunkSize <= 0) - end = combinedSize; - else { - if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { - end = Math.min(cur + targetChunkSize, combinedSize); - } else { - end = combinedSize; - } - } - - var thisChunk = pendingWrites.substring(cur, end); - - ChunkData newChunkData = createChunk(thisChunk); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(start, newChunkData.getName()); - - start += thisChunk.size(); - cur = end; - } - } - - file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); - - cleanupChunks(file.getData(), removedChunks.values()); - updateFileSize((JObject) file); - } finally { - file.rwUnlock(); } + +// NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); +// NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); + +// if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { +// beforeFirst = chunksAll; +// afterLast = Collections.emptyNavigableMap(); +// first = null; +// last = null; +// start = offset; +// } else if (!chunksAll.isEmpty()) { +// var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); +// removedChunks.putAll(between); +// start = first.getKey(); +// } + + ByteString pendingWrites = ByteString.empty(); + + if (first != null && first.getKey().key() < offset) { + var chunkBytes = readChunk(first.getValue().ref()); + pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey().key()))); + } + pendingWrites = pendingWrites.concat(data); + + if (last != null) { + var lchunkBytes = readChunk(last.getValue().ref()); + if (last.getKey().key() + lchunkBytes.size() > offset + data.size()) { + var startInFile = offset + data.size(); + var startInChunk = startInFile - last.getKey().key(); + pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); + } + } + + int combinedSize = pendingWrites.size(); + + if (targetChunkSize > 0) { +// if (combinedSize < (targetChunkSize * writeMergeThreshold)) { +// boolean leftDone = false; +// boolean rightDone = false; +// while (!leftDone && !rightDone) { +// if (beforeFirst.isEmpty()) leftDone = true; +// if (!beforeFirst.isEmpty() || !leftDone) { +// var takeLeft = beforeFirst.lastEntry(); +// +// var cuuid = takeLeft.getValue(); +// +// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { +// leftDone = true; +// continue; +// } +// +// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { +// leftDone = true; +// continue; +// } +// +// // FIXME: (and test this) +// beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); +// start = takeLeft.getKey(); +// pendingWrites = readChunk(cuuid).concat(pendingWrites); +// combinedSize += getChunkSize(cuuid); +// removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); +// } +// if (afterLast.isEmpty()) rightDone = true; +// if (!afterLast.isEmpty() && !rightDone) { +// var takeRight = afterLast.firstEntry(); +// +// var cuuid = takeRight.getValue(); +// +// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { +// rightDone = true; +// continue; +// } +// +// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { +// rightDone = true; +// continue; +// } +// +// // FIXME: (and test this) +// afterLast = afterLast.tailMap(takeRight.getKey(), false); +// pendingWrites = pendingWrites.concat(readChunk(cuuid)); +// combinedSize += getChunkSize(cuuid); +// removedChunks.put(takeRight.getKey(), takeRight.getValue()); +// } +// } +// } + } + + NavigableMap newChunks = new TreeMap<>(); + + { + int cur = 0; + while (cur < combinedSize) { + int end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { + end = Math.min(cur + targetChunkSize, combinedSize); + } else { + end = combinedSize; + } + } + + var thisChunk = pendingWrites.substring(cur, end); + + ChunkData newChunkData = createChunk(thisChunk); + newChunks.put(start, newChunkData.key()); + + start += thisChunk.size(); + cur = end; + } + } + + for (var e : removedChunks.entrySet()) { + Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue()); + jMapHelper.delete(file, JMapLongKey.of(e.getKey())); + } + + for (var e : newChunks.entrySet()) { + Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue()); + jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue()); + } + + remoteTx.putData(file); + cleanupChunks(file, removedChunks.values()); + updateFileSize(file); + return (long) data.size(); }); } @Override - public Boolean truncate(String fileUuid, long length) { + public Boolean truncate(JObjectKey fileUuid, long length) { return jObjectTxManager.executeTx(() -> { if (length < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); - var file = (JObject) jObjectManager.get(fileUuid).orElse(null); + var file = remoteTx.getData(File.class, fileUuid).orElse(null); if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); + Log.error("File not found when trying to write: " + fileUuid); return false; } if (length == 0) { - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - - var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getData().getChunks())); - - file.mutate(new JMutator<>() { - long oldMtime; - - @Override - public boolean mutate(File object) { - oldMtime = object.getMtime(); - object.getChunks().clear(); - return true; - } - - @Override - public void revert(File object) { - object.setMtime(oldMtime); - object.getChunks().putAll(oldChunks); - } - }); - cleanupChunks(file.getData(), oldChunks.values()); - updateFileSize((JObject) file); - } catch (Exception e) { - Log.error("Error writing file chunks: " + fileUuid, e); - return false; - } finally { - file.rwUnlock(); - } - return true; - } - - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - - var curSize = size(fileUuid); - if (curSize == length) return true; - - var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); - NavigableMap removedChunks = new TreeMap<>(); - NavigableMap newChunks = new TreeMap<>(); - - if (curSize < length) { - long combinedSize = (length - curSize); - - long start = curSize; - - // Hack - HashMap zeroCache = new HashMap<>(); - - { - long cur = 0; - while (cur < combinedSize) { - long end; - - if (targetChunkSize <= 0) - end = combinedSize; - else { - if ((combinedSize - cur) > (targetChunkSize * 1.5)) { - end = cur + targetChunkSize; - } else { - end = combinedSize; - } - } - - if (!zeroCache.containsKey(end - cur)) - zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); - - ChunkData newChunkData = createChunk(zeroCache.get(end - cur)); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(start, newChunkData.getName()); - - start += newChunkData.getSize(); - cur = end; - } + try (var it = jMapHelper.getIterator(file, IteratorStart.GE, JMapLongKey.of(0))) { + while (it.hasNext()) { + var next = it.next(); + jMapHelper.delete(file, next.getKey()); } - } else { - var tail = chunksAll.lowerEntry(length); - var afterTail = chunksAll.tailMap(tail.getKey(), false); - - removedChunks.put(tail.getKey(), tail.getValue()); - removedChunks.putAll(afterTail); - - var tailBytes = readChunk(tail.getValue()); - var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); - - ChunkData newChunkData = createChunk(newChunk); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(tail.getKey(), newChunkData.getName()); } - - file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); - - cleanupChunks(file.getData(), removedChunks.values()); - updateFileSize((JObject) file); +// var oldChunks = file.chunks(); +// +// file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis()); + remoteTx.putData(file); +// cleanupChunks(file, oldChunks.values()); + updateFileSize(file); return true; - } catch (Exception e) { - Log.error("Error reading file: " + fileUuid, e); - return false; - } finally { - file.rwUnlock(); } + + var curSize = size(fileUuid); + if (curSize == length) return true; + + NavigableMap removedChunks = new TreeMap<>(); + NavigableMap newChunks = new TreeMap<>(); + + if (curSize < length) { + long combinedSize = (length - curSize); + + long start = curSize; + + // Hack + HashMap zeroCache = new HashMap<>(); + + { + long cur = 0; + while (cur < combinedSize) { + long end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * 1.5)) { + end = cur + targetChunkSize; + } else { + end = combinedSize; + } + } + + if (!zeroCache.containsKey(end - cur)) + zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); + + ChunkData newChunkData = createChunk(zeroCache.get(end - cur)); + newChunks.put(start, newChunkData.key()); + + start += newChunkData.data().size(); + cur = end; + } + } + } else { +// Pair> first; + Pair> last; + try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) { + last = it.hasNext() ? it.next() : null; + while (it.hasNext()) { + var next = it.next(); + removedChunks.put(next.getKey().key(), next.getValue().ref()); + } + } + removedChunks.put(last.getKey().key(), last.getValue().ref()); +// +// NavigableMap removedChunks = new TreeMap<>(); +// +// long start = 0; +// +// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) { +// first = it.hasNext() ? it.next() : null; +// boolean empty = last == null; +// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) { +// first = null; +// last = null; +// start = offset; +// } else if (!empty) { +// assert first != null; +// removedChunks.put(first.getKey().key(), first.getValue().ref()); +// while (it.hasNext() && it.peekNextKey() != last.getKey()) { +// var next = it.next(); +// removedChunks.put(next.getKey().key(), next.getValue().ref()); +// } +// removedChunks.put(last.getKey().key(), last.getValue().ref()); +// } +// } +// +// var tail = chunksAll.lowerEntry(length); +// var afterTail = chunksAll.tailMap(tail.getKey(), false); +// +// removedChunks.put(tail.getKey(), tail.getValue()); +// removedChunks.putAll(afterTail); + + var tailBytes = readChunk(last.getValue().ref()); + var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key())); + + ChunkData newChunkData = createChunk(newChunk); + newChunks.put(last.getKey().key(), newChunkData.key()); + } + +// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); + + for (var e : removedChunks.entrySet()) { + Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue()); + jMapHelper.delete(file, JMapLongKey.of(e.getKey())); + } + + for (var e : newChunks.entrySet()) { + Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue()); + jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue()); + } + + remoteTx.putData(file); + cleanupChunks(file, removedChunks.values()); + updateFileSize(file); + return true; }); } @Override - public String readlink(String uuid) { + public String readlink(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { return readlinkBS(uuid).toStringUtf8(); }); } @Override - public ByteString readlinkBS(String uuid) { + public ByteString readlinkBS(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var fileOpt = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); - - return fileOpt.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { - if (!(fileData instanceof File)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - - if (!((File) fileData).isSymlink()) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Not a symlink: " + uuid)); - - return read(uuid, 0, Math.toIntExact(size(uuid))).get(); - }); + var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); + return read(uuid, 0, Math.toIntExact(size(uuid))).get(); }); } @Override - public String symlink(String oldpath, String newpath) { + public JObjectKey symlink(String oldpath, String newpath) { return jObjectTxManager.executeTx(() -> { Path path = Path.of(newpath); var parent = getDirEntry(path.getParent().toString()); @@ -708,107 +708,58 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = new File(fuuid, 0, true); - var newNodeId = _tree.getNewNodeId(); ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); + File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true, 0); + jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key()); - f.getChunks().put(0L, newChunkData.getName()); + updateFileSize(f); - jObjectManager.put(newChunkData, Optional.of(f.getName())); - var newFile = jObjectManager.putLocked(f, Optional.of(newNodeId)); - try { - updateFileSize(newFile); - } finally { - newFile.rwUnlock(); - } - - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); - return f.getName(); + getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId()); + return f.key(); }); } @Override - public Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs) { + public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) { return jObjectTxManager.executeTx(() -> { - var file = jObjectManager.get(fileUuid).orElseThrow( + var file = remoteTx.getData(File.class, fileUuid).orElseThrow( () -> new StatusRuntimeException(Status.NOT_FOUND.withDescription( "File not found for setTimes: " + fileUuid)) ); - file.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, fileData, bump, i) -> { - if (fileData instanceof JKleppmannTreeNode) return null; // FIXME: - if (!(fileData instanceof FsNode fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - bump.apply(); - fd.setMtime(mtimeMs); - return null; - }); - + remoteTx.putData(file.withCTime(atimeMs).withMTime(mtimeMs)); return true; }); } @Override - public void updateFileSize(JObject file) { + public void updateFileSize(File file) { jObjectTxManager.executeTx(() -> { - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - if (!(file.getData() instanceof File fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + long realSize = 0; - long realSize = 0; + Pair> last; + Log.tracev("Getting last"); + try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) { + last = it.hasNext() ? it.next() : null; + } - var last = fd.getChunks().lastEntry(); - if (last != null) { - var lastSize = getChunkSize(last.getValue()); - realSize = last.getKey() + lastSize; - } + if (last != null) { + realSize = last.getKey().key() + getChunkSize(last.getValue().ref()); + } - if (realSize != fd.getSize()) { - long finalRealSize = realSize; - file.mutate(new JMutator() { - long oldSize; - - @Override - public boolean mutate(File object) { - oldSize = object.getSize(); - object.setSize(finalRealSize); - return true; - } - - @Override - public void revert(File object) { - object.setSize(oldSize); - } - }); - } - } catch (Exception e) { - Log.error("Error updating file size: " + file.getMeta().getName(), e); - } finally { - file.rwUnlock(); + if (realSize != file.size()) { + remoteTx.putData(file.withSize(realSize)); } }); } @Override - public Long size(String uuid) { + public Long size(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var read = jObjectManager.get(uuid) + var read = remoteTx.getData(File.class, uuid) .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); - try { - return read.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (fsNodeData, fileData) -> { - if (!(fileData instanceof File fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - return fd.getSize(); - }); - } catch (Exception e) { - Log.error("Error reading file: " + uuid, e); - return -1L; - } + return read.size(); }); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java index 0fa8ee29..34111b34 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java @@ -5,7 +5,6 @@ import com.sun.security.auth.module.UnixSystem; import com.usatiuk.dhfs.files.service.DhfsFileService; import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException; import com.usatiuk.dhfs.files.service.GetattrRes; -import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; import com.usatiuk.kleppmanntree.AlreadyExistsException; import io.grpc.Status; @@ -38,8 +37,6 @@ import static jnr.posix.FileStat.*; public class DhfsFuse extends FuseStubFS { private static final int blksize = 1048576; private static final int iosize = 1048576; - @Inject - ObjectPersistentStore persistentStore; // FIXME? @ConfigProperty(name = "dhfs.fuse.root") String root; @ConfigProperty(name = "dhfs.fuse.enabled") @@ -100,15 +97,16 @@ public class DhfsFuse extends FuseStubFS { try { stbuf.f_frsize.set(blksize); stbuf.f_bsize.set(blksize); - stbuf.f_blocks.set(persistentStore.getTotalSpace() / blksize); // total data blocks in file system - stbuf.f_bfree.set(persistentStore.getFreeSpace() / blksize); // free blocks in fs - stbuf.f_bavail.set(persistentStore.getUsableSpace() / blksize); // avail blocks in fs + // FIXME: + stbuf.f_blocks.set(1024 * 1024 * 1024 / blksize); // total data blocks in file system + stbuf.f_bfree.set(1024 * 1024 * 1024 / blksize); // free blocks in fs + stbuf.f_bavail.set(1024 * 1024 * 1024 / blksize); // avail blocks in fs stbuf.f_files.set(1000); //FIXME: stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME: stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME: stbuf.f_namemax.set(2048); return super.statfs(path, stbuf); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When statfs " + path, e); return -ErrorCodes.EIO(); } @@ -149,9 +147,6 @@ public class DhfsFuse extends FuseStubFS { stat.st_atim.tv_sec.set(found.get().mtime() / 1000); stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000); stat.st_blksize.set(blksize); - } catch (Exception e) { - Log.error("When getattr " + path, e); - return -ErrorCodes.EIO(); } catch (Throwable e) { Log.error("When getattr " + path, e); return -ErrorCodes.EIO(); @@ -170,7 +165,7 @@ public class DhfsFuse extends FuseStubFS { timespec[1].tv_sec.get() * 1000); if (!res) return -ErrorCodes.EINVAL(); else return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When utimens " + path, e); return -ErrorCodes.EIO(); } @@ -181,7 +176,7 @@ public class DhfsFuse extends FuseStubFS { try { if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT(); return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When open " + path, e); return -ErrorCodes.EIO(); } @@ -199,7 +194,7 @@ public class DhfsFuse extends FuseStubFS { if (read.isEmpty()) return 0; UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); return read.get().size(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When reading " + path, e); return -ErrorCodes.EIO(); } @@ -213,15 +208,19 @@ public class DhfsFuse extends FuseStubFS { if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); var buffer = UninitializedByteBuffer.allocateUninitialized((int) size); - jnrPtrByteOutputAccessors.getUnsafe().copyMemory( - buf.address(), - jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer), - size - ); + if (buffer.isDirect()) { + jnrPtrByteOutputAccessors.getUnsafe().copyMemory( + buf.address(), + jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer), + size + ); + } else { + buf.get(0, buffer.array(), 0, (int) size); + } var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer)); return written.intValue(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When writing " + path, e); return -ErrorCodes.EIO(); } @@ -233,7 +232,7 @@ public class DhfsFuse extends FuseStubFS { var ret = fileService.create(path, mode); if (ret.isEmpty()) return -ErrorCodes.ENOSPC(); else return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When creating " + path, e); return -ErrorCodes.EIO(); } @@ -246,7 +245,7 @@ public class DhfsFuse extends FuseStubFS { return 0; } catch (AlreadyExistsException aex) { return -ErrorCodes.EEXIST(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When creating dir " + path, e); return -ErrorCodes.EIO(); } @@ -259,7 +258,7 @@ public class DhfsFuse extends FuseStubFS { return 0; } catch (DirectoryNotEmptyException ex) { return -ErrorCodes.ENOTEMPTY(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When removing dir " + path, e); return -ErrorCodes.EIO(); } @@ -271,7 +270,7 @@ public class DhfsFuse extends FuseStubFS { var ret = fileService.rename(path, newName); if (!ret) return -ErrorCodes.ENOENT(); else return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When renaming " + path, e); return -ErrorCodes.EIO(); } @@ -283,7 +282,7 @@ public class DhfsFuse extends FuseStubFS { try { fileService.unlink(path); return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When unlinking " + path, e); return -ErrorCodes.EIO(); } @@ -301,7 +300,7 @@ public class DhfsFuse extends FuseStubFS { return 0; else return -ErrorCodes.ENOSPC(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When truncating " + path, e); return -ErrorCodes.EIO(); } @@ -315,7 +314,7 @@ public class DhfsFuse extends FuseStubFS { var ret = fileService.chmod(fileOpt.get(), mode); if (ret) return 0; else return -ErrorCodes.EINVAL(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When chmod " + path, e); return -ErrorCodes.EIO(); } @@ -341,7 +340,7 @@ public class DhfsFuse extends FuseStubFS { } return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When readdir " + path, e); return -ErrorCodes.EIO(); } @@ -359,7 +358,7 @@ public class DhfsFuse extends FuseStubFS { UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); buf.putByte(Math.min(size - 1, read.size()), (byte) 0); return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When reading " + path, e); return -ErrorCodes.EIO(); } @@ -371,7 +370,7 @@ public class DhfsFuse extends FuseStubFS { var fileOpt = fileService.open(path); if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When chown " + path, e); return -ErrorCodes.EIO(); } @@ -383,7 +382,7 @@ public class DhfsFuse extends FuseStubFS { var ret = fileService.symlink(oldpath, newpath); if (ret == null) return -ErrorCodes.EEXIST(); else return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When creating " + newpath, e); return -ErrorCodes.EIO(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java index d2790516..51be0f7a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java @@ -51,7 +51,7 @@ public class JnrPtrByteOutput extends ByteOutput { var out = _backing.address() + _pos; _accessors.getUnsafe().copyMemory(addr, out, rem); } else { - throw new UnsupportedOperationException(); + _backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem); } _pos += rem; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java index 78cc8ff4..6ec005d7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java @@ -3,16 +3,13 @@ package com.usatiuk.dhfs.fuse; import jakarta.inject.Singleton; import jdk.internal.access.JavaNioAccess; import jdk.internal.access.SharedSecrets; -import lombok.Getter; import sun.misc.Unsafe; import java.lang.reflect.Field; @Singleton class JnrPtrByteOutputAccessors { - @Getter JavaNioAccess _nioAccess; - @Getter Unsafe _unsafe; JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException { @@ -21,4 +18,12 @@ class JnrPtrByteOutputAccessors { f.setAccessible(true); _unsafe = (Unsafe) f.get(null); } + + public JavaNioAccess getNioAccess() { + return _nioAccess; + } + + public Unsafe getUnsafe() { + return _unsafe; + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ConflictResolver.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ConflictResolver.java new file mode 100644 index 00000000..f3b1acc7 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ConflictResolver.java @@ -0,0 +1,5 @@ +package com.usatiuk.dhfs.objects; + +public interface ConflictResolver { + void resolve(PeerId fromPeer, RemoteObjectMeta ours, RemoteObjectMeta theirs); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java new file mode 100644 index 00000000..6910cb1a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java @@ -0,0 +1,65 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class DeleterTxHook implements PreCommitTxHook { + @Inject + Transaction curTx; + @Inject + RemoteObjectDeleter remoteObjectDeleter; + + private boolean canDelete(JDataRefcounted data) { + return !data.frozen() && data.refsFrom().isEmpty(); + } + + @Override + public void onChange(JObjectKey key, JData old, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + if (canDelete(refCur)) { + if (refCur instanceof RemoteObjectMeta ro) { + curTx.onCommit(() -> remoteObjectDeleter.putDeletionCandidate(ro)); + return; + } + Log.trace("Deleting object on change: " + key); + curTx.delete(key); + } + } + + @Override + public void onCreate(JObjectKey key, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + if (canDelete(refCur)) { + if (refCur instanceof RemoteObjectMeta ro) { + curTx.onCommit(() -> remoteObjectDeleter.putDeletionCandidate(ro)); + return; + } + Log.warn("Deleting object on creation: " + key); + curTx.delete(key); + } + } + + @Override + public void onDelete(JObjectKey key, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + if (!canDelete(refCur)) { + throw new IllegalStateException("Deleting object with refs: " + key); + } + } + + @Override + public int getPriority() { + return 200; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java new file mode 100644 index 00000000..09293ebb --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java @@ -0,0 +1,20 @@ +package com.usatiuk.dhfs.objects; + +import org.pcollections.PCollection; + +import java.util.Collection; +import java.util.List; + +public interface JDataRefcounted extends JData { + PCollection refsFrom(); + + JDataRefcounted withRefsFrom(PCollection refs); + + boolean frozen(); + + JDataRefcounted withFrozen(boolean frozen); + + default Collection collectRefsTo() { + return List.of(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java new file mode 100644 index 00000000..2a39f186 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java @@ -0,0 +1,17 @@ +package com.usatiuk.dhfs.objects; + +import java.io.Serializable; +import java.util.Collection; +import java.util.List; + +public interface JDataRemote extends Serializable { + JObjectKey key(); + + default int estimateSize() { + return 100; + } + + default Collection collectRefsTo() { + return List.of(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java new file mode 100644 index 00000000..339f2a53 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java @@ -0,0 +1,23 @@ +package com.usatiuk.dhfs.objects; + +import java.io.Serializable; + +public record PeerId(JObjectKey id) implements Serializable, Comparable { + public static PeerId of(String id) { + return new PeerId(JObjectKey.of(id)); + } + + @Override + public String toString() { + return id.toString(); + } + + public JObjectKey toJObjectKey() { + return JObjectKey.of(id.toString()); + } + + @Override + public int compareTo(PeerId o) { + return id.compareTo(o.id); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java new file mode 100644 index 00000000..6826b09f --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java @@ -0,0 +1,6 @@ +package com.usatiuk.dhfs.objects; + +import org.pcollections.PMap; + +public record ReceivedObject(PMap changelog, JDataRemote data) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java new file mode 100644 index 00000000..b6f515cd --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -0,0 +1,109 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.jmap.JMapEntry; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class RefcounterTxHook implements PreCommitTxHook { + @Inject + Transaction curTx; + + private JDataRefcounted getRef(JObjectKey key) { + var found = curTx.get(JDataRefcounted.class, key).orElse(null); + + if (found != null) { + return found; + } + + return new RemoteObjectMeta(key); + } + + @Override + public void onChange(JObjectKey key, JData old, JData cur) { + if (cur instanceof JMapEntry me) { + var oldMe = (JMapEntry) old; + var oldRef = oldMe.ref(); + var curRef = me.ref(); + var referencedOld = getRef(oldRef); + curTx.put(referencedOld.withRefsFrom(referencedOld.refsFrom().minus(key))); + var referencedCur = getRef(curRef); + curTx.put(referencedCur.withRefsFrom(referencedCur.refsFrom().plus(key))); + Log.tracev("Removed ref from {0} to {1}, added ref to {2}", key, oldRef, curRef); + return; + } + + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + var refOld = (JDataRefcounted) old; + + var curRefs = refCur.collectRefsTo(); + var oldRefs = refOld.collectRefsTo(); + + for (var curRef : curRefs) { + if (!oldRefs.contains(curRef)) { + var referenced = getRef(curRef); + curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); + Log.tracev("Added ref from {0} to {1}", key, curRef); + } + } + + for (var oldRef : oldRefs) { + if (!curRefs.contains(oldRef)) { + var referenced = getRef(oldRef); + curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); + Log.tracev("Removed ref from {0} to {1}", key, oldRef); + } + } + } + + @Override + public void onCreate(JObjectKey key, JData cur) { + if (cur instanceof JMapEntry me) { + var curRef = me.ref(); + var referencedCur = getRef(curRef); + curTx.put(referencedCur.withRefsFrom(referencedCur.refsFrom().plus(key))); + Log.tracev("Added ref from {0} to {1}", key, curRef); + return; + } + + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + for (var newRef : refCur.collectRefsTo()) { + var referenced = getRef(newRef); + curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); + Log.tracev("Added ref from {0} to {1}", key, newRef); + } + } + + @Override + public void onDelete(JObjectKey key, JData cur) { + if (cur instanceof JMapEntry me) { + var oldRef = me.ref(); + var referencedOld = getRef(oldRef); + curTx.put(referencedOld.withRefsFrom(referencedOld.refsFrom().minus(key))); + Log.tracev("Removed ref from {0} to {1}", key, oldRef); + return; + } + + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + for (var removedRef : refCur.collectRefsTo()) { + var referenced = getRef(removedRef); + curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); + Log.tracev("Removed ref from {0} to {1}", key, removedRef); + } + } + + @Override + public int getPriority() { + return 100; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java new file mode 100644 index 00000000..47100484 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java @@ -0,0 +1,49 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class RemoteObjPusherTxHook implements PreCommitTxHook { + @Inject + Transaction curTx; + @Inject + InvalidationQueueService invalidationQueueService; + + @Override + public void onChange(JObjectKey key, JData old, JData cur) { + boolean invalidate = switch (cur) { + case RemoteObjectMeta remote -> !remote.changelog().equals(((RemoteObjectMeta) old).changelog()); + case JKleppmannTreePersistentData pd -> !pd.queues().equals(((JKleppmannTreePersistentData) old).queues()); + default -> false; + }; + + if (invalidate) { + invalidationQueueService.pushInvalidationToAll(cur.key()); + } + } + + @Override + public void onCreate(JObjectKey key, JData cur) { + if (!(cur instanceof RemoteObjectMeta remote)) { + return; + } + + invalidationQueueService.pushInvalidationToAll(remote.key()); + } + + @Override + public void onDelete(JObjectKey key, JData cur) { + if (!(cur instanceof RemoteObjectMeta remote)) { + return; + } + } + + @Override + public int getPriority() { + return 100; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDataWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDataWrapper.java new file mode 100644 index 00000000..12877b9b --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDataWrapper.java @@ -0,0 +1,43 @@ +package com.usatiuk.dhfs.objects; + +import org.pcollections.HashTreePSet; +import org.pcollections.PCollection; + +import java.util.Collection; + +public record RemoteObjectDataWrapper(PCollection refsFrom, + boolean frozen, + T data) implements JDataRefcounted { + public RemoteObjectDataWrapper(T data) { + this(HashTreePSet.empty(), false, data); + } + + @Override + public RemoteObjectDataWrapper withRefsFrom(PCollection refs) { + return new RemoteObjectDataWrapper<>(refs, frozen, data); + } + + @Override + public RemoteObjectDataWrapper withFrozen(boolean frozen) { + return new RemoteObjectDataWrapper<>(refsFrom, frozen, data); + } + + public RemoteObjectDataWrapper withData(T data) { + return new RemoteObjectDataWrapper<>(refsFrom, frozen, data); + } + + @Override + public JObjectKey key() { + return RemoteObjectMeta.ofDataKey(data.key()); + } + + @Override + public Collection collectRefsTo() { + return data.collectRefsTo(); + } + + @Override + public int estimateSize() { + return data.estimateSize(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java new file mode 100644 index 00000000..7c7730f7 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java @@ -0,0 +1,241 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfo; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +@ApplicationScoped +public class RemoteObjectDeleter { + private final HashSetDelayedBlockingQueue _quickCandidates = new HashSetDelayedBlockingQueue<>(0); + private final HashSetDelayedBlockingQueue _candidates; + private final HashSetDelayedBlockingQueue _canDeleteRetries; + private final HashSet _movablesInProcessing = new HashSet<>(); + + @Inject + TransactionManager txm; + @Inject + Transaction curTx; + @Inject + RemoteTransaction remoteTx; + @Inject + PeerInfoService peerInfoService; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + + @ConfigProperty(name = "dhfs.objects.move-processor.threads") + int moveProcessorThreads; + @ConfigProperty(name = "dhfs.objects.ref-processor.threads") + int refProcessorThreads; + @ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay") + long canDeleteRetryDelay; + + private ExecutorService _movableProcessorExecutorService; + private ExecutorService _refProcessorExecutorService; + + public RemoteObjectDeleter(@ConfigProperty(name = "dhfs.objects.deletion.delay") long deletionDelay, + @ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay") long canDeleteRetryDelay) { + _candidates = new HashSetDelayedBlockingQueue<>(deletionDelay); + _canDeleteRetries = new HashSetDelayedBlockingQueue<>(canDeleteRetryDelay); + } + + void init(@Observes @Priority(200) StartupEvent event) throws IOException { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("move-proc-%d") + .build(); + _movableProcessorExecutorService = Executors.newFixedThreadPool(moveProcessorThreads, factory); + + BasicThreadFactory factoryRef = new BasicThreadFactory.Builder() + .namingPattern("ref-proc-%d") + .build(); + _refProcessorExecutorService = Executors.newFixedThreadPool(refProcessorThreads, factoryRef); + for (int i = 0; i < refProcessorThreads; i++) { + _refProcessorExecutorService.submit(this::refProcessor); + } + + // Continue GC from last shutdown + //FIXME +// executorService.submit(() -> +// jObjectManager.findAll().forEach(n -> { +// jObjectManager.get(n).ifPresent(o -> o.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { +// return null; +// })); +// })); + } + + void shutdown(@Observes @Priority(800) ShutdownEvent event) throws InterruptedException { + _refProcessorExecutorService.shutdownNow(); + if (!_refProcessorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) { + Log.error("Refcounting threads didn't exit in 30 seconds"); + } + } + +// public void putQuickDeletionCandidate(JObjectKey obj) { +// _quickCandidates.add(obj); +// } + + public void putDeletionCandidate(RemoteObjectMeta obj) { + if (!obj.seen()) { + if (_quickCandidates.add(obj.key())) + Log.debug("Quick deletion candidate: " + obj.key()); + return; + } + if (_candidates.add(obj.key())) + Log.debug("Deletion candidate: " + obj.key()); + } + + private void asyncProcessMovable(JObjectKey objName) { + synchronized (_movablesInProcessing) { + if (_movablesInProcessing.contains(objName)) return; + _movablesInProcessing.add(objName); + } + Log.debugv("Async processing of remote obj del: {0}", objName); + + _movableProcessorExecutorService.submit(() -> { + boolean delay = true; + try { + delay = txm.run(() -> { + Log.debugv("Starting async processing of remote obj del: {0}", objName); + RemoteObjectMeta target = curTx.get(RemoteObjectMeta.class, objName).orElse(null); + if (target == null) return true; + if (!canDelete(target)) return true; + + if (canDeleteImmediately(target)) { + Log.debugv("Async processing of remote obj del: immediate {0}", objName); + curTx.delete(objName); + return true; + } + + var knownHosts = peerInfoService.getPeersNoSelf(); + RemoteObjectMeta finalTarget = target; + List missing = knownHosts.stream() + .map(PeerInfo::id) + .filter(id -> !finalTarget.confirmedDeletes().contains(id)).toList(); + + var ret = remoteObjectServiceClient.canDelete(missing, objName, target.refsFrom()); + + long ok = 0; + + for (var r : ret) { + if (!r.getValue().getDeletionCandidate()) { +// for (var rr : r.getReferrersList()) +// autoSyncProcessor.add(rr); + } else { + target = target.withConfirmedDeletes(target.confirmedDeletes().plus(r.getKey())); + ok++; + } + } + + curTx.put(target); + + if (ok != missing.size()) { + Log.debugv("Delaying deletion check of {0}", objName); + return true; + } else { + assert canDeleteImmediately(target); + Log.debugv("Async processing of remote obj del: after query {0}", objName); + curTx.delete(objName); + return false; + } + }); + } finally { + synchronized (_movablesInProcessing) { + _movablesInProcessing.remove(objName); + if (!delay) + _candidates.add(objName); + else + _canDeleteRetries.add(objName); + } + } + }); + } + + // FIXME: + private boolean canDelete(JDataRefcounted obj) { + return obj.refsFrom().isEmpty() && !obj.frozen(); + } + + // Returns true if the object can be deleted + private boolean canDeleteImmediately(RemoteObjectMeta obj) { + if (!obj.seen()) + return true; + + var knownHosts = peerInfoService.getPeersNoSelf(); + boolean missing = false; + for (var x : knownHosts) { + if (!obj.confirmedDeletes().contains(x.id())) { + missing = true; + break; + } + } + return !missing; + } + + private void refProcessor() { + while (true) { + try { + while (!Thread.interrupted()) { + JObjectKey next = null; + JObjectKey nextQuick = null; + + while (next == null && nextQuick == null) { + nextQuick = _quickCandidates.tryGet(); + + if (nextQuick != null) break; + + next = _canDeleteRetries.tryGet(); + if (next == null) + next = _candidates.tryGet(); + if (next == null) + nextQuick = _quickCandidates.get(canDeleteRetryDelay); + } + + Stream.of(next, nextQuick).filter(Objects::nonNull).forEach(realNext -> { + Log.debugv("Processing remote object deletion candidate: {0}", realNext); + var deleted = txm.run(() -> { + RemoteObjectMeta target = curTx.get(RemoteObjectMeta.class, realNext).orElse(null); + if (target == null) return true; + + if (!canDelete(target)) return true; + + if (canDeleteImmediately(target)) { + Log.debugv("Immediate deletion of: {0}", realNext); + curTx.delete(realNext); + return true; + } + + return false; + }); + if (!deleted) + asyncProcessMovable(realNext); + }); + } + } catch (InterruptedException ignored) { + return; + } catch (Throwable error) { + Log.error("Exception in refcounter thread", error); + } + Log.info("JObject Refcounter thread exiting"); + } + } + +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java new file mode 100644 index 00000000..b2d9ab6d --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java @@ -0,0 +1,103 @@ +package com.usatiuk.dhfs.objects; + +import org.pcollections.*; + +import java.util.Collection; +import java.util.List; + +public record RemoteObjectMeta(PCollection refsFrom, boolean frozen, + JObjectKey key, + PMap knownRemoteVersions, + Class knownType, + PSet confirmedDeletes, + boolean seen, + PMap changelog, + boolean hasLocalData) implements JDataRefcounted { + // Self put + public RemoteObjectMeta(JDataRemote data, PeerId initialPeer) { + this(HashTreePSet.empty(), false, + data.key(), HashTreePMap.empty(), data.getClass(), HashTreePSet.empty(), false, + HashTreePMap.empty().plus(initialPeer, 1L), + true); + } + + public RemoteObjectMeta(JObjectKey key, PMap remoteChangelog) { + this(HashTreePSet.empty(), false, + key, HashTreePMap.empty(), JDataRemote.class, HashTreePSet.empty(), true, + remoteChangelog, + false); + } + + public RemoteObjectMeta(JObjectKey key) { + this(HashTreePSet.empty(), false, + key, HashTreePMap.empty(), JDataRemote.class, HashTreePSet.empty(), true, + TreePMap.empty(), + false); + } + + @Override + public JObjectKey key() { + return ofMetaKey(key); + } + + public static JObjectKey ofMetaKey(JObjectKey key) { + return key; + } + + public static JObjectKey ofDataKey(JObjectKey key) { + return JObjectKey.of("data_" + key.name()); + } + + public JObjectKey dataKey() { + return ofDataKey(key); + } + + @Override + public RemoteObjectMeta withRefsFrom(PCollection refs) { + return new RemoteObjectMeta(refs, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); + } + + @Override + public RemoteObjectMeta withFrozen(boolean frozen) { + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); + } + + public RemoteObjectMeta withKnownRemoteVersions(PMap knownRemoteVersions) { + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); + } + + public RemoteObjectMeta withKnownType(Class knownType) { + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); + } + + public RemoteObjectMeta withConfirmedDeletes(PSet confirmedDeletes) { + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); + } + + public RemoteObjectMeta withSeen(boolean seen) { + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); + } + + public RemoteObjectMeta withChangelog(PMap changelog) { + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); + } + + public RemoteObjectMeta withHaveLocal(boolean haveLocal) { + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + public long versionSum() { + return changelog.values().stream().mapToLong(Long::longValue).sum(); + } + + @Override + public Collection collectRefsTo() { + if (hasLocalData) return List.of(dataKey()); + return List.of(); + } + + @Override + public int estimateSize() { + return 1000; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java new file mode 100644 index 00000000..6e48ea94 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java @@ -0,0 +1,119 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; +import com.usatiuk.dhfs.objects.repository.SyncHandler; +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.mutable.MutableObject; + +import java.util.Optional; + +@ApplicationScoped +public class RemoteTransaction { + @Inject + Transaction curTx; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + @Inject + SyncHandler syncHandler; + @Inject + PersistentPeerDataService persistentPeerDataService; + + private Optional> tryDownloadRemote(RemoteObjectMeta obj) { + MutableObject> success = new MutableObject<>(null); + + try { + remoteObjectServiceClient.getObject(obj.key(), rcv -> { + if (!obj.knownType().isInstance(rcv.getRight().data())) + throw new IllegalStateException("Object type mismatch: " + obj.knownType() + " vs " + rcv.getRight().data().getClass()); + + syncHandler.handleRemoteUpdate(rcv.getLeft(), obj.key(), rcv.getRight().changelog(), rcv.getRight().data()); + + var now = curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(obj.key())).orElse(null); + assert now != null; + + if (!now.hasLocalData()) + return false; + + var gotData = curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(obj.key())).orElse(null); + assert gotData != null; + + success.setValue(gotData); + return true; + }); + } catch (Exception e) { + Log.error("Failed to download object " + obj.key(), e); + return Optional.empty(); + } + + return Optional.of(success.getValue()); + } + + @SuppressWarnings("unchecked") + private Optional getData(Class type, JObjectKey key, LockingStrategy strategy, boolean tryRequest) { + return curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(key), strategy) + .flatMap(obj -> { + if (obj.hasLocalData()) { + var realData = curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(key), strategy).orElse(null); + if (realData == null) + throw new IllegalStateException("Local data not found for " + key); // TODO: Race + if (!type.isInstance(realData.data())) + throw new IllegalStateException("Object type mismatch: " + realData.data().getClass() + " vs " + type); + return Optional.of((T) realData.data()); + } + if (!tryRequest) + return Optional.empty(); + return tryDownloadRemote(obj).map(wrapper -> (T) wrapper.data()); + }); + } + + public Optional getMeta(JObjectKey key, LockingStrategy strategy) { + return curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(key), strategy); + } + + public void putData(T obj) { + var curMeta = getMeta(obj.key()).orElse(null); + + if (curMeta == null) { + curTx.put(new RemoteObjectMeta(obj, persistentPeerDataService.getSelfUuid())); + curTx.put(new RemoteObjectDataWrapper<>(obj)); + return; + } + +// if (cur.data() != null && cur.data().equals(obj)) +// return; + if (!curMeta.knownType().isAssignableFrom(obj.getClass())) + throw new IllegalStateException("Object type mismatch: " + curMeta.knownType() + " vs " + obj.getClass()); + var newMeta = curMeta; + newMeta = newMeta.withChangelog(newMeta.changelog().plus(persistentPeerDataService.getSelfUuid(), + newMeta.changelog().get(persistentPeerDataService.getSelfUuid()) + 1)); + curTx.put(newMeta); + var newData = curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(obj.key())) + .map(w -> w.withData(obj)).orElse(new RemoteObjectDataWrapper<>(obj)); + curTx.put(newData); + } + + public Optional getMeta(JObjectKey key) { + return getMeta(key, LockingStrategy.OPTIMISTIC); + } + + public Optional getData(Class type, JObjectKey key) { + return getData(type, key, LockingStrategy.OPTIMISTIC, true); + } + + public Optional getDataLocal(Class type, JObjectKey key) { + return getData(type, key, LockingStrategy.OPTIMISTIC, false); + } + + public Optional getData(Class type, JObjectKey key, LockingStrategy strategy) { + return getData(type, key, strategy, true); + } + + public Optional getDataLocal(Class type, JObjectKey key, LockingStrategy strategy) { + return getData(type, key, strategy, false); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryJDataRemoteSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryJDataRemoteSerializer.java new file mode 100644 index 00000000..8131c103 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryJDataRemoteSerializer.java @@ -0,0 +1,21 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.persistence.JDataRemoteP; +import com.usatiuk.dhfs.utils.SerializationHelper; +import jakarta.inject.Singleton; + +@Singleton +public class TemporaryJDataRemoteSerializer implements ProtoSerializer { + @Override + public JDataRemote deserialize(JDataRemoteP message) { + return SerializationHelper.deserialize(message.getSerializedData().toByteArray()); + } + + @Override + public JDataRemoteP serialize(JDataRemote object) { + return JDataRemoteP.newBuilder() + .setSerializedData(SerializationHelper.serialize(object)) + .build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryOpSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryOpSerializer.java new file mode 100644 index 00000000..ab9682db --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryOpSerializer.java @@ -0,0 +1,22 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.repository.OpP; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; +import com.usatiuk.dhfs.utils.SerializationHelper; +import jakarta.inject.Singleton; + +@Singleton +public class TemporaryOpSerializer implements ProtoSerializer { + @Override + public Op deserialize(OpP message) { + return SerializationHelper.deserialize(message.getSerializedData().toByteArray()); + } + + @Override + public OpP serialize(Op object) { + return OpP.newBuilder() + .setSerializedData(SerializationHelper.serialize(object)) + .build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index 43502d20..43b9cf7e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -1,75 +1,74 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; -import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.*; -import com.usatiuk.dhfs.objects.jrepository.*; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObject; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.dhfs.objects.repository.opsupport.OpSender; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.*; -import com.usatiuk.utils.VoidFn; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; +import org.pcollections.HashTreePMap; +import org.pcollections.TreePMap; +import org.pcollections.TreePSet; import java.util.*; -import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; @ApplicationScoped public class JKleppmannTreeManager { private static final String dataFileName = "trees"; - private final ConcurrentHashMap _trees = new ConcurrentHashMap<>(); @Inject - JKleppmannTreePeerInterface jKleppmannTreePeerInterface; + Transaction curTx; @Inject - OpSender opSender; - @Inject - OpObjectRegistry opObjectRegistry; - @Inject - JObjectManager jObjectManager; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - JObjectTxManager jObjectTxManager; - @Inject - SoftJObjectFactory softJObjectFactory; + TransactionManager txManager; @Inject JKleppmannTreePeerInterface peerInterface; + @Inject + PeerInfoService peerInfoService; - public JKleppmannTree getTree(String name) { - return _trees.computeIfAbsent(name, this::createTree); - } - - private JKleppmannTree createTree(String name) { - return jObjectTxManager.executeTx(() -> { - var data = jObjectManager.get(JKleppmannTreePersistentData.nameFromTreeName(name)).orElse(null); + public JKleppmannTree getTree(JObjectKey name) { + return txManager.executeTx(() -> { + var data = curTx.get(JKleppmannTreePersistentData.class, name, LockingStrategy.WRITE).orElse(null); if (data == null) { - data = jObjectManager.put(new JKleppmannTreePersistentData(name), Optional.empty()); + data = new JKleppmannTreePersistentData( + name, + TreePSet.empty(), + true, + 1L, + HashTreePMap.empty(), + HashTreePMap.empty(), + TreePMap.empty() + ); + curTx.put(data); + var rootNode = new JKleppmannTreeNode(JObjectKey.of(name.name() + "_jt_root"), null, new JKleppmannTreeNodeMetaDirectory("")); + curTx.put(rootNode); + var trashNode = new JKleppmannTreeNode(JObjectKey.of(name.name() + "_jt_trash"), null, new JKleppmannTreeNodeMetaDirectory("")); + curTx.put(trashNode); } - var tree = new JKleppmannTree(name); - opObjectRegistry.registerObject(tree); - return tree; + return new JKleppmannTree(data); +// opObjectRegistry.registerObject(tree); }); } - public class JKleppmannTree implements OpObject { - private final KleppmannTree _tree; - - private final SoftJObject _persistentData; - + public class JKleppmannTree { + private final KleppmannTree _tree; private final JKleppmannTreeStorageInterface _storageInterface; private final JKleppmannTreeClock _clock; + private final JObjectKey _treeName; + private JKleppmannTreePersistentData _data; - private final String _treeName; - - JKleppmannTree(String treeName) { - _treeName = treeName; - - _persistentData = softJObjectFactory.create(JKleppmannTreePersistentData.class, JKleppmannTreePersistentData.nameFromTreeName(treeName)); + JKleppmannTree(JKleppmannTreePersistentData data) { + _treeName = data.key(); + _data = data; _storageInterface = new JKleppmannTreeStorageInterface(); _clock = new JKleppmannTreeClock(); @@ -77,111 +76,77 @@ public class JKleppmannTreeManager { _tree = new KleppmannTree<>(_storageInterface, peerInterface, _clock, new JOpRecorder()); } - public String traverse(List names) { + public JObjectKey traverse(List names) { return _tree.traverse(names); } - public String getNewNodeId() { + public JObjectKey getNewNodeId() { return _storageInterface.getNewNodeId(); } - public void move(String newParent, JKleppmannTreeNodeMeta newMeta, String node) { + public void move(JObjectKey newParent, JKleppmannTreeNodeMeta newMeta, JObjectKey node) { _tree.move(newParent, newMeta, node); } - public void trash(JKleppmannTreeNodeMeta newMeta, String node) { - _tree.move(_storageInterface.getTrashId(), newMeta.withName(node), node); + public void trash(JKleppmannTreeNodeMeta newMeta, JObjectKey nodeKey) { + _tree.move(_storageInterface.getTrashId(), newMeta.withName(nodeKey.toString()), nodeKey); } - @Override - public boolean hasPendingOpsForHost(UUID host) { - return _persistentData.get() - .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getQueues().containsKey(host) && - !d.getQueues().get(host).isEmpty() - ); + public boolean hasPendingOpsForHost(PeerId host) { + return !_data.queues().getOrDefault(host, TreePMap.empty()).isEmpty(); } - @Override - public List getPendingOpsForHost(UUID host, int limit) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d.getQueues().containsKey(host)) { - var queue = d.getQueues().get(host); - ArrayList collected = new ArrayList<>(); - - for (var node : queue.entrySet()) { - collected.add(new JKleppmannTreeOpWrapper(node.getValue())); - if (collected.size() >= limit) break; - } - - return collected; - } - return List.of(); - }); + public List getPendingOpsForHost(PeerId host, int limit) { + ArrayList collected = new ArrayList<>(); + for (var node : _data.queues().getOrDefault(host, TreePMap.empty()).entrySet()) { + collected.add(new JKleppmannTreeOpWrapper(_data.key(), node.getValue())); + if (collected.size() >= limit) break; + } + return Collections.unmodifiableList(collected); } - @Override - public String getId() { - return _treeName; - } - - @Override - public void commitOpForHost(UUID host, Op op) { + // @Override + public void commitOpForHost(PeerId host, Op op) { if (!(op instanceof JKleppmannTreeOpWrapper jop)) - throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass()); - var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue(); - if (!Objects.equals(jop.getOp(), got)) + var firstOp = _data.queues().get(host).firstEntry().getValue(); + if (!Objects.equals(firstOp, jop.op())) throw new IllegalArgumentException("Committed op push was not the oldest"); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getQueues().get(host).pollFirstEntry(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp()); - } - }); - + _data = _data.withQueues(_data.queues().plus(host, _data.queues().get(host).minus(_data.queues().get(host).firstKey()))); + curTx.put(_data); } - @Override - public void pushBootstrap(UUID host) { + public void recordBootstrap(PeerId host) { _tree.recordBoostrapFor(host); } - public Pair findParent(Function predicate) { + public Pair findParent(Function, Boolean> predicate) { return _tree.findParent(predicate); } - @Override - public boolean acceptExternalOp(UUID from, Op op) { + // @Override + public boolean acceptExternalOp(PeerId from, Op op) { if (op instanceof JKleppmannTreePeriodicPushOp pushOp) { return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp()); } if (!(op instanceof JKleppmannTreeOpWrapper jop)) - throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); + throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass()); - JObject fileRef; - if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { - var fino = f.getFileIno(); - fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId())); - } else { - fileRef = null; - } +// if (jop.op().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { +// var fino = f.getFileIno(); +// fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId())); +// } else { +// fileRef = null; +// } if (Log.isTraceEnabled()) - Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName()); + Log.trace("Received op from " + from + ": " + jop.op().timestamp().timestamp() + " " + jop.op().childId() + "->" + jop.op().newParentId() + " as " + jop.op().newMeta().getName()); try { - _tree.applyExternalOp(from, jop.getOp()); + _tree.applyExternalOp(from, jop.op()); } catch (Exception e) { Log.error("Error applying external op", e); throw e; @@ -189,376 +154,216 @@ public class JKleppmannTreeManager { // FIXME: // Fixup the ref if it didn't really get applied - if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile)) - Log.error("Could not create child of pushed op: " + jop.getOp()); +// if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile)) +// Log.error("Could not create child of pushed op: " + jop.getOp()); - if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { - if (fileRef != null) { - var got = jObjectManager.get(jop.getOp().childId()).orElse(null); - - VoidFn remove = () -> { - fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { - m.removeRef(jop.getOp().childId()); - }); - }; - - if (got == null) { - remove.apply(); - } else { - try { - got.rLock(); - try { - got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno())) - remove.apply(); - } finally { - got.rUnlock(); - } - } catch (DeletedObjectAccessException dex) { - remove.apply(); - } - } - } - } +// if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { +// if (fileRef != null) { +// var got = jObjectManager.get(jop.getOp().childId()).orElse(null); +// +// VoidFn remove = () -> { +// fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { +// m.removeRef(jop.getOp().childId()); +// }); +// }; +// +// if (got == null) { +// remove.apply(); +// } else { +// try { +// got.rLock(); +// try { +// got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +// if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno())) +// remove.apply(); +// } finally { +// got.rUnlock(); +// } +// } catch (DeletedObjectAccessException dex) { +// remove.apply(); +// } +// } +// } +// } } return true; } - @Override - public Op getPeriodicPushOp() { - return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp()); - } +// @Override +// public Op getPeriodicPushOp() { +// return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp()); +// } - @Override - public void addToTx() { - // FIXME: a hack - _persistentData.get().rwLockNoCopy(); - _persistentData.get().rwUnlock(); - } +// @Override +// public void addToTx() { +// // FIXME: a hack +// _persistentData.get().rwLockNoCopy(); +// _persistentData.get().rwUnlock(); +// } - private class JOpRecorder implements OpRecorder { + private class JOpRecorder implements OpRecorder { @Override - public void recordOp(OpMove op) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - var hostUuds = persistentPeerDataService.getHostUuids().stream().toList(); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.recordOp(hostUuds, op); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.removeOp(hostUuds, op); - } - }); - opSender.push(JKleppmannTree.this); + public void recordOp(OpMove op) { + for (var p : peerInfoService.getPeersNoSelf()) { + recordOpForPeer(p.id(), op); + } } @Override - public void recordOpForPeer(UUID peer, OpMove op) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.recordOp(peer, op); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.removeOp(peer, op); - } - }); - opSender.push(JKleppmannTree.this); + public void recordOpForPeer(PeerId peer, OpMove op) { + _data = _data.withQueues(_data.queues().plus(peer, _data.queues().getOrDefault(peer, TreePMap.empty()).plus(op.timestamp(), op))); + curTx.put(_data); } } private class JKleppmannTreeClock implements Clock { @Override public Long getTimestamp() { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - var ret = _persistentData.get().getData().getClock().peekTimestamp() + 1; - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getClock().getTimestamp(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getClock().ungetTimestamp(); - } - }); - return ret; + var res = _data.clock() + 1; + _data = _data.withClock(res); + curTx.put(_data); + return res; } @Override public Long peekTimestamp() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getClock().peekTimestamp()); + return _data.clock(); } @Override public Long updateTimestamp(Long receivedTimestamp) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - Long _old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - _old = object.getClock().updateTimestamp(receivedTimestamp); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getClock().setTimestamp(_old); - } - }); - return _persistentData.get().getData().getClock().peekTimestamp(); + var old = _data.clock(); + _data = _data.withClock(Math.max(old, receivedTimestamp) + 1); + curTx.put(_data); + return old; } } - public class JKleppmannTreeStorageInterface implements StorageInterface { + public class JKleppmannTreeStorageInterface implements StorageInterface { private final LogWrapper _logWrapper = new LogWrapper(); private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper(); public JKleppmannTreeStorageInterface() { - if (jObjectManager.get(getRootId()).isEmpty()) { - putNode(new JKleppmannTreeNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory("")))); - putNode(new JKleppmannTreeNode(new TreeNode<>(getTrashId(), null, null))); - } - } - - public JObject putNode(JKleppmannTreeNode node) { - return jObjectManager.put(node, Optional.ofNullable(node.getNode().getParent())); - } - - public JObject putNodeLocked(JKleppmannTreeNode node) { - return jObjectManager.putLocked(node, Optional.ofNullable(node.getNode().getParent())); } @Override - public String getRootId() { - return _treeName + "_jt_root"; + public JObjectKey getRootId() { + return new JObjectKey(_treeName.name() + "_jt_root"); } @Override - public String getTrashId() { - return _treeName + "_jt_trash"; + public JObjectKey getTrashId() { + return new JObjectKey(_treeName.name() + "_jt_trash"); } @Override - public String getNewNodeId() { - return persistentPeerDataService.getUniqueId(); + public JObjectKey getNewNodeId() { + return new JObjectKey(UUID.randomUUID().toString()); } @Override - public JKleppmannTreeNodeWrapper getById(String id) { - var got = jObjectManager.get(id); - if (got.isEmpty()) return null; - return new JKleppmannTreeNodeWrapper((JObject) got.get()); + public JKleppmannTreeNode getById(JObjectKey id) { + var got = curTx.get(JKleppmannTreeNode.class, id); + return got.orElse(null); } @Override - public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { - return new JKleppmannTreeNodeWrapper(putNodeLocked(new JKleppmannTreeNode(node))); + public JKleppmannTreeNode createNewNode(JObjectKey key, JObjectKey parent, JKleppmannTreeNodeMeta meta) { + return new JKleppmannTreeNode(key, parent, meta); } @Override - public void removeNode(String id) {} + public void putNode(TreeNode node) { + curTx.put(((JKleppmannTreeNode) node)); + } @Override - public LogInterface getLog() { + public void removeNode(JObjectKey id) { + // TODO + } + + @Override + public LogInterface getLog() { return _logWrapper; } @Override - public PeerTimestampLogInterface getPeerTimestampLog() { + public PeerTimestampLogInterface getPeerTimestampLog() { return _peerLogWrapper; } - @Override - public void rLock() { - _persistentData.get().rLock(); - } - - @Override - public void rUnlock() { - _persistentData.get().rUnlock(); - } - - @Override - public void rwLock() { - _persistentData.get().rwLockNoCopy(); - } - - @Override - public void rwUnlock() { - _persistentData.get().rwUnlock(); - } - - @Override - public void assertRwLock() { - _persistentData.get().assertRwLock(); - } - - private class PeerLogWrapper implements PeerTimestampLogInterface { - + private class PeerLogWrapper implements PeerTimestampLogInterface { @Override - public Long getForPeer(UUID peerId) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getPeerTimestampLog().get(peerId)); + public Long getForPeer(PeerId peerId) { + return _data.peerTimestampLog().get(peerId); } @Override - public void putForPeer(UUID peerId, Long timestamp) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - Long old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - old = object.getPeerTimestampLog().put(peerId, timestamp); - return !Objects.equals(old, timestamp); - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - if (old != null) - object.getPeerTimestampLog().put(peerId, old); - else - object.getPeerTimestampLog().remove(peerId, timestamp); - } - }); + public void putForPeer(PeerId peerId, Long timestamp) { + _data = _data.withPeerTimestampLog(_data.peerTimestampLog().plus(peerId, timestamp)); + curTx.put(_data); } } - private class LogWrapper implements LogInterface { + private class LogWrapper implements LogInterface { @Override - public Pair, LogRecord> peekOldest() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var ret = d.getLog().firstEntry(); - if (ret == null) return null; - return Pair.of(ret); - }); + public Pair, LogRecord> peekOldest() { + if (_data.log().isEmpty()) return null; + return Pair.of(_data.log().firstEntry()); } @Override - public Pair, LogRecord> takeOldest() { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - - var ret = _persistentData.get().getData().getLog().firstEntry(); - if (ret != null) - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getLog().pollFirstEntry(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getLog().put(ret.getKey(), ret.getValue()); - } - }); + public Pair, LogRecord> takeOldest() { + if (_data.log().isEmpty()) return null; + var ret = _data.log().firstEntry(); + _data = _data.withLog(_data.log().minusFirstEntry()); + curTx.put(_data); return Pair.of(ret); } @Override - public Pair, LogRecord> peekNewest() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var ret = d.getLog().lastEntry(); - if (ret == null) return null; - return Pair.of(ret); - }); + public Pair, LogRecord> peekNewest() { + if (_data.log().isEmpty()) return null; + return Pair.of(_data.log().lastEntry()); } @Override - public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var tail = d.getLog().tailMap(since, inclusive); - return tail.entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); - }); + public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { + return _data.log().tailMap(since, inclusive).entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); } @Override - public List, LogRecord>> getAll() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); - }); + public List, LogRecord>> getAll() { + return _data.log().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); } @Override public boolean isEmpty() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().isEmpty(); - }); + return _data.log().isEmpty(); } @Override - public boolean containsKey(CombinedTimestamp timestamp) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().containsKey(timestamp); - }); + public boolean containsKey(CombinedTimestamp timestamp) { + return _data.log().containsKey(timestamp); } @Override public long size() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return (long) d.getLog().size(); - }); + return _data.log().size(); } @Override - public void put(CombinedTimestamp timestamp, LogRecord record) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (_persistentData.get().getData().getLog().containsKey(timestamp)) + public void put(CombinedTimestamp timestamp, LogRecord record) { + if (_data.log().containsKey(timestamp)) throw new IllegalStateException("Overwriting log entry?"); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getLog().put(timestamp, record); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getLog().remove(timestamp, record); - } - }); + _data = _data.withLog(_data.log().plus(timestamp, record)); + curTx.put(_data); } @Override - public void replace(CombinedTimestamp timestamp, LogRecord record) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - LogRecord old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - old = object.getLog().put(timestamp, record); - return !Objects.equals(old, record); - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - if (old != null) - object.getLog().put(timestamp, old); - else - object.getLog().remove(timestamp, record); - } - }); + public void replace(CombinedTimestamp timestamp, LogRecord record) { + _data = _data.withLog(_data.log().plus(timestamp, record)); + curTx.put(_data); } } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java index 4612f8fc..1bcf5798 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -1,28 +1,22 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; import com.usatiuk.kleppmanntree.OpMove; -import lombok.Getter; +import java.io.Serializable; import java.util.Collection; import java.util.List; -import java.util.UUID; // Wrapper to avoid having to specify generic types -public class JKleppmannTreeOpWrapper implements Op { - @Getter - private final OpMove _op; - - public JKleppmannTreeOpWrapper(OpMove op) { - if (op == null) throw new IllegalArgumentException("op shouldn't be null"); - _op = op; - } - +public record JKleppmannTreeOpWrapper(JObjectKey treeName, + OpMove op) implements Op, Serializable { @Override - public Collection getEscapedRefs() { - if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { + public Collection getEscapedRefs() { + if (op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { return List.of(mf.getFileIno()); } return List.of(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java index 39b5d484..0ea613f7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java @@ -1,25 +1,31 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfo; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; import com.usatiuk.kleppmanntree.PeerInterface; import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.util.Collection; +import java.util.List; import java.util.UUID; @Singleton -public class JKleppmannTreePeerInterface implements PeerInterface { +public class JKleppmannTreePeerInterface implements PeerInterface { + @Inject + PeerInfoService peerInfoService; @Inject PersistentPeerDataService persistentPeerDataService; @Override - public UUID getSelfId() { + public PeerId getSelfId() { return persistentPeerDataService.getSelfUuid(); } @Override - public Collection getAllPeers() { - return persistentPeerDataService.getHostUuidsAndSelf(); + public Collection getAllPeers() { + return peerInfoService.getPeers().stream().map(PeerInfo::id).toList(); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java index 3c84d067..679e1249 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java @@ -1,25 +1,28 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import lombok.Getter; +import com.usatiuk.dhfs.objects.PeerId; -import java.util.Collection; -import java.util.List; import java.util.UUID; -public class JKleppmannTreePeriodicPushOp implements Op { - @Getter - private final UUID _from; - @Getter +public class JKleppmannTreePeriodicPushOp { + private final PeerId _from; private final long _timestamp; - public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) { + public JKleppmannTreePeriodicPushOp(PeerId from, long timestamp) { _from = from; _timestamp = timestamp; } - @Override - public Collection getEscapedRefs() { - return List.of(); + public PeerId getFrom() { + return _from; } + + public long getTimestamp() { + return _timestamp; + } + +// @Override +// public Collection getEscapedRefs() { +// return List.of(); +// } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 0146da88..8b2afa28 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -1,45 +1,72 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.peersync.structs.JKleppmannTreeNodeMetaPeer; +import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.kleppmanntree.TreeNode; -import lombok.Getter; +import org.pcollections.HashTreePMap; +import org.pcollections.PCollection; +import org.pcollections.PMap; +import org.pcollections.TreePSet; +import java.io.Serializable; import java.util.Collection; import java.util.Collections; -import java.util.List; -import java.util.UUID; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; // FIXME: Ideally this is two classes? -@OnlyLocal -public class JKleppmannTreeNode extends JObjectData { - @Getter - final TreeNode _node; +public record JKleppmannTreeNode(JObjectKey key, PCollection refsFrom, boolean frozen, JObjectKey parent, + OpMove lastEffectiveOp, + JKleppmannTreeNodeMeta meta, + PMap children) implements TreeNode, JDataRefcounted, Serializable { - public JKleppmannTreeNode(TreeNode node) { - _node = node; + public JKleppmannTreeNode(JObjectKey id, JObjectKey parent, JKleppmannTreeNodeMeta meta) { + this(id, TreePSet.empty(), false, parent, null, meta, HashTreePMap.empty()); } @Override - public String getName() { - return _node.getId(); + public JKleppmannTreeNode withParent(JObjectKey parent) { + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } @Override - public Class getConflictResolver() { - return null; + public JKleppmannTreeNode withLastEffectiveOp(OpMove lastEffectiveOp) { + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } @Override - public Collection extractRefs() { - if (_node.getMeta() instanceof JKleppmannTreeNodeMetaFile) - return List.of(((JKleppmannTreeNodeMetaFile) _node.getMeta()).getFileIno()); - return Collections.unmodifiableCollection(_node.getChildren().values()); + public JKleppmannTreeNode withMeta(JKleppmannTreeNodeMeta meta) { + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } @Override - public Class getRefType() { - return JObjectData.class; + public JKleppmannTreeNode withChildren(PMap children) { + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); + } + + @Override + public JKleppmannTreeNode withRefsFrom(PCollection refs) { + return new JKleppmannTreeNode(key, refs, frozen, parent, lastEffectiveOp, meta, children); + } + + @Override + public JKleppmannTreeNode withFrozen(boolean frozen) { + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); + } + + @Override + public Collection collectRefsTo() { + return Stream.concat(children().values().stream(), + switch (meta()) { + case JKleppmannTreeNodeMetaDirectory dir -> Stream.of(); + case JKleppmannTreeNodeMetaFile file -> Stream.of(file.getFileIno()); + case JKleppmannTreeNodeMetaPeer peer -> Stream.of(peer.getPeerId()); + default -> throw new IllegalStateException("Unexpected value: " + meta()); + } + ).collect(Collectors.toUnmodifiableSet()); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java index 2ea7d27f..d2c1f988 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java @@ -1,18 +1,20 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; import com.usatiuk.kleppmanntree.NodeMeta; -import lombok.Getter; import java.util.Objects; -@ProtoMirror(JKleppmannTreeNodeMetaP.class) +//@ProtoMirror(JKleppmannTreeNodeMetaP.class) public abstract class JKleppmannTreeNodeMeta implements NodeMeta { - @Getter private final String _name; - public JKleppmannTreeNodeMeta(String name) {_name = name;} + public JKleppmannTreeNodeMeta(String name) { + _name = name; + } + + public String getName() { + return _name; + } public abstract JKleppmannTreeNodeMeta withName(String name); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java index 79882017..4cf3514d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java @@ -1,9 +1,5 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaDirectoryP; - -@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class) public class JKleppmannTreeNodeMetaDirectory extends JKleppmannTreeNodeMeta { public JKleppmannTreeNodeMetaDirectory(String name) { super(name); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java index 124cd51d..a7e9a3a5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -1,21 +1,22 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; -import lombok.Getter; +import com.usatiuk.dhfs.objects.JObjectKey; import java.util.Objects; -@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { - @Getter - private final String _fileIno; + private final JObjectKey _fileIno; - public JKleppmannTreeNodeMetaFile(String name, String fileIno) { + public JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) { super(name); _fileIno = fileIno; } + public JObjectKey getFileIno() { + return _fileIno; + } + @Override public JKleppmannTreeNodeMeta withName(String name) { return new JKleppmannTreeNodeMetaFile(name, _fileIno); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index d6881d5b..f6576804 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -1,88 +1,53 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import com.usatiuk.kleppmanntree.AtomicClock; +import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.kleppmanntree.CombinedTimestamp; import com.usatiuk.kleppmanntree.LogRecord; import com.usatiuk.kleppmanntree.OpMove; -import lombok.Getter; +import org.pcollections.PCollection; +import org.pcollections.PMap; +import org.pcollections.PSortedMap; +import org.pcollections.TreePMap; import java.util.*; -@OnlyLocal -public class JKleppmannTreePersistentData extends JObjectData { - private final String _treeName; - @Getter - private final AtomicClock _clock; - @Getter - private final HashMap, OpMove>> _queues; - @Getter - private final HashMap _peerTimestampLog; - @Getter - private final TreeMap, LogRecord> _log; - - public JKleppmannTreePersistentData(String treeName, AtomicClock clock, - HashMap, OpMove>> queues, - HashMap peerTimestampLog, TreeMap, LogRecord> log) { - _treeName = treeName; - _clock = clock; - _queues = queues; - _peerTimestampLog = peerTimestampLog; - _log = log; - } - - public JKleppmannTreePersistentData(String treeName) { - _treeName = treeName; - _clock = new AtomicClock(1); - _queues = new HashMap<>(); - _peerTimestampLog = new HashMap<>(); - _log = new TreeMap<>(); - } - - public static String nameFromTreeName(String treeName) { - return treeName + "_pd"; - } - - public void recordOp(UUID host, OpMove opMove) { - _queues.computeIfAbsent(host, h -> new TreeMap<>()); - _queues.get(host).put(opMove.timestamp(), opMove); - } - - public void removeOp(UUID host, OpMove opMove) { - _queues.get(host).remove(opMove.timestamp(), opMove); - } - - public void recordOp(Collection hosts, OpMove opMove) { - for (var u : hosts) { - recordOp(u, opMove); - } - } - - public void removeOp(Collection hosts, OpMove opMove) { - for (var u : hosts) { - removeOp(u, opMove); - } - } - - +public record JKleppmannTreePersistentData( + JObjectKey key, PCollection refsFrom, boolean frozen, + long clock, + PMap, OpMove>> queues, + PMap peerTimestampLog, + PSortedMap, LogRecord> log +) implements JDataRefcounted { @Override - public String getName() { - return nameFromTreeName(_treeName); - } - - public String getTreeName() { - return _treeName; + public JKleppmannTreePersistentData withRefsFrom(PCollection refs) { + return new JKleppmannTreePersistentData(key, refs, frozen, clock, queues, peerTimestampLog, log); } @Override - public Class getConflictResolver() { - return null; + public JKleppmannTreePersistentData withFrozen(boolean frozen) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); + } + + public JKleppmannTreePersistentData withClock(long clock) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); + } + + public JKleppmannTreePersistentData withQueues(PMap, OpMove>> queues) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); + } + + public JKleppmannTreePersistentData withPeerTimestampLog(PMap peerTimestampLog) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); + } + + public JKleppmannTreePersistentData withLog(PSortedMap, LogRecord> log) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } @Override - public Collection extractRefs() { - return List.of(); + public Collection collectRefsTo() { + return List.of(new JObjectKey(key().name() + "_jt_trash"), new JObjectKey(key().name() + "_jt_root")); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapEntry.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapEntry.java new file mode 100644 index 00000000..997c21bd --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapEntry.java @@ -0,0 +1,12 @@ +package com.usatiuk.dhfs.objects.jmap; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +public record JMapEntry>(JObjectKey holder, K selfKey, + JObjectKey ref) implements JData { + @Override + public JObjectKey key() { + return JMapHelper.makeKey(holder, selfKey); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHelper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHelper.java new file mode 100644 index 00000000..a777cb91 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHelper.java @@ -0,0 +1,49 @@ +package com.usatiuk.dhfs.objects.jmap; + +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.Optional; + +@ApplicationScoped +public class JMapHelper { + @Inject + Transaction curTx; + + static > JObjectKey makePrefix(JObjectKey holder) { + return JObjectKey.of(holder.name() + "/"); + } + + static > JObjectKey makeKey(JObjectKey holder, K key) { + return JObjectKey.of(makePrefix(holder).name() + key.toString()); + } + + public > CloseableKvIterator> getIterator(JMapHolder holder, IteratorStart start, K key) { + return new JMapIterator<>(curTx.getIterator(start, makeKey(holder.key(), key)), holder); + } + + public > CloseableKvIterator> getIterator(JMapHolder holder, K key) { + return getIterator(holder, IteratorStart.GE, key); + } + + public > CloseableKvIterator> getIterator(JMapHolder holder, IteratorStart start) { + return new JMapIterator<>(curTx.getIterator(start, makePrefix(holder.key())), holder); + } + + public > void put(JMapHolder holder, K key, JObjectKey ref) { + curTx.put(new JMapEntry<>(holder.key(), key, ref)); + } + + public > Optional> get(JMapHolder holder, K key) { + // TODO: + return curTx.get(JMapEntry.class, makeKey(holder.key(), key)).map(e -> (JMapEntry) e); + } + + public > void delete(JMapHolder holder, K key) { + curTx.delete(makeKey(holder.key(), key)); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHolder.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHolder.java new file mode 100644 index 00000000..b8dcbed3 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHolder.java @@ -0,0 +1,6 @@ +package com.usatiuk.dhfs.objects.jmap; + +import com.usatiuk.dhfs.objects.JData; + +public interface JMapHolder> extends JData { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java new file mode 100644 index 00000000..f13f1af7 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java @@ -0,0 +1,96 @@ +package com.usatiuk.dhfs.objects.jmap; + +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import org.apache.commons.lang3.NotImplementedException; +import org.apache.commons.lang3.tuple.Pair; + +public class JMapIterator> implements CloseableKvIterator> { + private final CloseableKvIterator _backing; + private final JObjectKey _prefix; + private boolean _hasNext = true; + + public JMapIterator(CloseableKvIterator backing, JMapHolder holder) { + _backing = backing; + _prefix = JMapHelper.makePrefix(holder.key()); + advance(); + } + + void advance() { + assert _hasNext; + if (!_backing.hasNext()) { + _hasNext = false; + return; + } + if (!_backing.peekNextKey().name().startsWith(_prefix.name())) { + _backing.skip(); + if (!_backing.peekNextKey().name().startsWith(_prefix.name())) { + _hasNext = false; + } + } + } + + public K keyToKey(JObjectKey key) { + var keyPart = key.name().substring(_prefix.name().length()); + return (K) JMapLongKey.of(Long.parseLong(keyPart)); + } + + @Override + public K peekNextKey() { + if (!_hasNext) { + throw new IllegalStateException("No next element"); + } + + return keyToKey(_backing.peekNextKey()); + } + + @Override + public void skip() { + if (!_hasNext) { + throw new IllegalStateException("No next element"); + } + advance(); + } + + @Override + public K peekPrevKey() { + throw new NotImplementedException(); + } + + @Override + public Pair> prev() { + throw new NotImplementedException(); + } + + @Override + public boolean hasPrev() { + throw new NotImplementedException(); + } + + @Override + public void skipPrev() { + throw new NotImplementedException(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _hasNext; + } + + @Override + public Pair> next() { + if (!_hasNext) { + throw new IllegalStateException("No next element"); + } + var next = _backing.next(); + assert next.getKey().name().startsWith(_prefix.name()); + advance(); + return Pair.of(keyToKey(next.getKey()), (JMapEntry) next.getValue()); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapKey.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapKey.java new file mode 100644 index 00000000..46e61a2c --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapKey.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.jmap; + +public interface JMapKey { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapLongKey.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapLongKey.java new file mode 100644 index 00000000..83461084 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapLongKey.java @@ -0,0 +1,24 @@ +package com.usatiuk.dhfs.objects.jmap; + +import javax.annotation.Nonnull; +import java.io.Serializable; + +public record JMapLongKey(long key) implements JMapKey, Comparable, Serializable { + public static JMapLongKey of(long key) { + return new JMapLongKey(key); + } + + @Override + public String toString() { + return String.format("%016d", key); + } + + public static JMapLongKey max() { + return new JMapLongKey(Long.MAX_VALUE); + } + + @Override + public int compareTo(@Nonnull JMapLongKey o) { + return Long.compare(key, o.key); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java index fcb5a07e..aeea40b5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java @@ -24,40 +24,52 @@ import java.util.Date; public class CertificateTools { - public static X509Certificate certFromBytes(byte[] bytes) throws CertificateException { - CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); - InputStream in = new ByteArrayInputStream(bytes); - return (X509Certificate) certFactory.generateCertificate(in); + public static X509Certificate certFromBytes(byte[] bytes) { + try { + CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + InputStream in = new ByteArrayInputStream(bytes); + return (X509Certificate) certFactory.generateCertificate(in); + } catch (CertificateException e) { + throw new RuntimeException(e); + } } - public static KeyPair generateKeyPair() throws NoSuchAlgorithmException { - KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); - keyGen.initialize(2048); //FIXME: - return keyGen.generateKeyPair(); + public static KeyPair generateKeyPair() { + try { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); + keyGen.initialize(2048); //FIXME: + return keyGen.generateKeyPair(); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } } - public static X509Certificate generateCertificate(KeyPair keyPair, String subject) throws CertificateException, CertIOException, NoSuchAlgorithmException, OperatorCreationException { - Provider bcProvider = new BouncyCastleProvider(); - Security.addProvider(bcProvider); + public static X509Certificate generateCertificate(KeyPair keyPair, String subject) { + try { + Provider bcProvider = new BouncyCastleProvider(); + Security.addProvider(bcProvider); - Date startDate = new Date(); + Date startDate = new Date(); - X500Name cnName = new X500Name("CN=" + subject); - BigInteger certSerialNumber = new BigInteger(DigestUtils.sha256(subject)); + X500Name cnName = new X500Name("CN=" + subject); + BigInteger certSerialNumber = new BigInteger(DigestUtils.sha256(subject)); - Calendar calendar = Calendar.getInstance(); - calendar.setTime(startDate); - calendar.add(Calendar.YEAR, 999); + Calendar calendar = Calendar.getInstance(); + calendar.setTime(startDate); + calendar.add(Calendar.YEAR, 999); - Date endDate = calendar.getTime(); + Date endDate = calendar.getTime(); - ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate()); + ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate()); - JcaX509v3CertificateBuilder certBuilder = new JcaX509v3CertificateBuilder(cnName, certSerialNumber, startDate, endDate, cnName, keyPair.getPublic()); + JcaX509v3CertificateBuilder certBuilder = new JcaX509v3CertificateBuilder(cnName, certSerialNumber, startDate, endDate, cnName, keyPair.getPublic()); - BasicConstraints basicConstraints = new BasicConstraints(false); - certBuilder.addExtension(new ASN1ObjectIdentifier("2.5.29.19"), true, basicConstraints); + BasicConstraints basicConstraints = new BasicConstraints(false); + certBuilder.addExtension(new ASN1ObjectIdentifier("2.5.29.19"), true, basicConstraints); - return new JcaX509CertificateConverter().setProvider(bcProvider).getCertificate(certBuilder.build(contentSigner)); + return new JcaX509CertificateConverter().setProvider(bcProvider).getCertificate(certBuilder.build(contentSigner)); + } catch (OperatorCreationException | CertificateException | CertIOException e) { + throw new RuntimeException(e); + } } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java index 29a53d88..824d07b5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java @@ -1,10 +1,16 @@ package com.usatiuk.dhfs.objects.repository; -import com.usatiuk.dhfs.objects.repository.peersync.PeerSyncApiClientDynamic; -import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerAddress; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryDirectory; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfo; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import com.usatiuk.dhfs.objects.repository.peersync.api.PeerSyncApiClientDynamic; +import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; import com.usatiuk.dhfs.objects.repository.webapi.AvailablePeerInfo; +import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; import io.quarkus.scheduler.Scheduled; import io.smallrye.common.annotation.Blocking; @@ -12,57 +18,52 @@ import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; -import lombok.Getter; import org.eclipse.microprofile.config.inject.ConfigProperty; import java.io.IOException; -import java.security.cert.CertificateException; import java.util.*; import java.util.concurrent.*; +import java.util.stream.Collectors; @ApplicationScoped public class PeerManager { - private final TransientPeersState _transientPeersState = new TransientPeersState(); - private final ConcurrentMap _seenButNotAdded = new ConcurrentHashMap<>(); + private final ConcurrentMap _states = new ConcurrentHashMap<>(); // FIXME: Ideally not call them on every ping private final ArrayList _connectedListeners = new ArrayList<>(); private final ArrayList _disconnectedListeners = new ArrayList<>(); @Inject PersistentPeerDataService persistentPeerDataService; @Inject - SyncHandler syncHandler; + PeerInfoService peerInfoService; @Inject RpcClientFactory rpcClientFactory; @Inject PeerSyncApiClientDynamic peerSyncApiClient; + @Inject + TransactionManager transactionManager; + @Inject + Transaction curTx; + @Inject + PeerTrustManager peerTrustManager; @ConfigProperty(name = "dhfs.objects.sync.ping.timeout") long pingTimeout; + @Inject + PeerDiscoveryDirectory peerDiscoveryDirectory; + @Inject + SyncHandler syncHandler; private ExecutorService _heartbeatExecutor; - @Getter - private boolean _ready = false; // Note: keep priority updated with below void init(@Observes @Priority(600) StartupEvent event) throws IOException { _heartbeatExecutor = Executors.newVirtualThreadPerTaskExecutor(); - - // Note: newly added hosts aren't in _transientPeersState - // but that's ok as they don't have initialSyncDone set - for (var h : persistentPeerDataService.getHostUuids()) - _transientPeersState.runWriteLocked(d -> d.get(h)); - - _ready = true; - } - - void shutdown(@Observes @Priority(50) ShutdownEvent event) throws IOException { - _ready = false; } @Scheduled(every = "${dhfs.objects.reconnect_interval}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) @Blocking public void tryConnectAll() { - if (!_ready) return; + if (_heartbeatExecutor == null) return; try { - _heartbeatExecutor.invokeAll(persistentPeerDataService.getHostUuids() + _heartbeatExecutor.invokeAll(peerInfoService.getPeersNoSelf() .stream() .>map(host -> () -> { try { @@ -70,8 +71,9 @@ public class PeerManager { Log.trace("Heartbeat: " + host); else Log.debug("Trying to connect to " + host); - if (pingCheck(host)) - handleConnectionSuccess(host); + var bestAddr = selectBestAddress(host.id()); + if (pingCheck(host, bestAddr)) + handleConnectionSuccess(host, bestAddr); else handleConnectionError(host); } catch (Exception e) { @@ -86,7 +88,6 @@ public class PeerManager { // Note: registrations should be completed with Priority < 600 public void registerConnectEventListener(ConnectionEventListener listener) { - if (_ready) throw new IllegalStateException("Already initialized"); synchronized (_connectedListeners) { _connectedListeners.add(listener); } @@ -94,65 +95,48 @@ public class PeerManager { // Note: registrations should be completed with Priority < 600 public void registerDisconnectEventListener(ConnectionEventListener listener) { - if (_ready) throw new IllegalStateException("Already initialized"); synchronized (_disconnectedListeners) { _disconnectedListeners.add(listener); } } - public void handleConnectionSuccess(UUID host) { - if (!_ready) return; - + private void handleConnectionSuccess(PeerInfo host, PeerAddress address) { boolean wasReachable = isReachable(host); - boolean shouldSyncObj = persistentPeerDataService.markInitialObjSyncDone(host); - boolean shouldSyncOp = persistentPeerDataService.markInitialOpSyncDone(host); + boolean shouldSync = persistentPeerDataService.markInitialSyncDone(host.id()); - if (shouldSyncObj) - syncHandler.pushInitialResyncObj(host); - if (shouldSyncOp) - syncHandler.pushInitialResyncOp(host); + if (shouldSync) + syncHandler.doInitialSync(host.id()); - _transientPeersState.runWriteLocked(d -> { - d.get(host).setReachable(true); - return null; - }); + _states.put(host.id(), address); if (wasReachable) return; Log.info("Connected to " + host); - for (var l : _connectedListeners) { - l.apply(host); - } +// for (var l : _connectedListeners) { +// l.apply(host); +// } } - public void handleConnectionError(UUID host) { + public void handleConnectionError(PeerInfo host) { boolean wasReachable = isReachable(host); if (wasReachable) Log.info("Lost connection to " + host); - _transientPeersState.runWriteLocked(d -> { - d.get(host).setReachable(false); - return null; - }); + _states.remove(host.id()); - for (var l : _disconnectedListeners) { - l.apply(host); - } +// for (var l : _disconnectedListeners) { +// l.apply(host); +// } } // FIXME: - private boolean pingCheck(UUID host) { - TransientPeerState state = _transientPeersState.runReadLocked(s -> s.getCopy(host)); - + private boolean pingCheck(PeerInfo host, PeerAddress address) { try { - return rpcClientFactory.withObjSyncClient(host.toString(), state.getAddr(), state.getSecurePort(), pingTimeout, c -> { - var ret = c.ping(PingRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); - if (!UUID.fromString(ret.getSelfUuid()).equals(host)) { - throw new IllegalStateException("Ping selfUuid returned " + ret.getSelfUuid() + " but expected " + host); - } + return rpcClientFactory.withObjSyncClient(host.id(), address, pingTimeout, (peer, c) -> { + c.ping(PingRequest.getDefaultInstance()); return true; }); } catch (Exception ignored) { @@ -161,109 +145,69 @@ public class PeerManager { } } - public boolean isReachable(UUID host) { - return _transientPeersState.runReadLocked(d -> d.get(host).isReachable()); + public boolean isReachable(PeerId host) { + return _states.containsKey(host); } - public TransientPeerState getTransientState(UUID host) { - return _transientPeersState.runReadLocked(d -> d.getCopy(host)); + public boolean isReachable(PeerInfo host) { + return isReachable(host.id()); } - public List getAvailableHosts() { - return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream() - .filter(e -> e.getValue().isReachable()) - .map(Map.Entry::getKey).toList()); + public PeerAddress getAddress(PeerId host) { + return _states.get(host); } - public List getUnavailableHosts() { - return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream() - .filter(e -> !e.getValue().isReachable()) - .map(Map.Entry::getKey).toList()); + public List getAvailableHosts() { + return _states.keySet().stream().toList(); } +// public List getUnavailableHosts() { +// return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream() +// .filter(e -> !e.getValue().isReachable()) +// .map(Map.Entry::getKey).toList()); +// } + public HostStateSnapshot getHostStateSnapshot() { - ArrayList available = new ArrayList<>(); - ArrayList unavailable = new ArrayList<>(); - _transientPeersState.runReadLocked(d -> { - for (var v : d.getStates().entrySet()) { - if (v.getValue().isReachable()) - available.add(v.getKey()); - else - unavailable.add(v.getKey()); - } - return null; - } - ); - return new HostStateSnapshot(available, unavailable); - } - - public void notifyAddr(UUID host, String addr, Integer port, Integer securePort) { - if (host.equals(persistentPeerDataService.getSelfUuid())) { - return; - } - - var state = new TransientPeerState(); - state.setAddr(addr); - state.setPort(port); - state.setSecurePort(securePort); - - if (!persistentPeerDataService.existsHost(host)) { - var prev = _seenButNotAdded.put(host, state); - // Needed for tests - if (prev == null) - Log.debug("Ignoring new address from unknown host " + ": addr=" + addr + " port=" + port); - return; - } else { - _seenButNotAdded.remove(host); - } - - _transientPeersState.runWriteLocked(d -> { -// Log.trace("Updating connection info for " + host + ": addr=" + addr + " port=" + port); - d.get(host).setAddr(addr); - d.get(host).setPort(port); - d.get(host).setSecurePort(securePort); - return null; + return transactionManager.run(() -> { + var partition = peerInfoService.getPeersNoSelf().stream().map(PeerInfo::id) + .collect(Collectors.partitioningBy(this::isReachable)); + return new HostStateSnapshot(partition.get(true), partition.get(false)); }); } - public void removeRemoteHost(UUID host) { - persistentPeerDataService.removeHost(host); - // Race? - _transientPeersState.runWriteLocked(d -> { - d.getStates().remove(host); - return null; + public void removeRemoteHost(PeerId peerId) { + transactionManager.run(() -> { + peerInfoService.removePeer(peerId); }); } - public void addRemoteHost(UUID host) { - if (!_seenButNotAdded.containsKey(host)) { - throw new IllegalStateException("Host " + host + " is not seen"); - } - if (persistentPeerDataService.existsHost(host)) { + private PeerAddress selectBestAddress(PeerId host) { + return peerDiscoveryDirectory.getForPeer(host).stream().findFirst().orElseThrow(); + } + + public void addRemoteHost(PeerId host) { + if (_states.containsKey(host)) { throw new IllegalStateException("Host " + host + " is already added"); } - var state = _seenButNotAdded.get(host); + transactionManager.run(() -> { + if (peerInfoService.getPeerInfo(host).isPresent()) + throw new IllegalStateException("Host " + host + " is already added"); - // FIXME: race? + var info = peerSyncApiClient.getSelfInfo(selectBestAddress(host)); - var info = peerSyncApiClient.getSelfInfo(state.getAddr(), state.getPort()); + var cert = Base64.getDecoder().decode(info.cert()); + peerInfoService.putPeer(host, cert); + }); - try { - persistentPeerDataService.addHost( - new PersistentPeerInfo(UUID.fromString(info.selfUuid()), - CertificateTools.certFromBytes(Base64.getDecoder().decode(info.cert())))); - Log.info("Added host: " + host.toString()); - } catch (CertificateException e) { - throw new RuntimeException(e); - } + peerTrustManager.reloadTrustManagerHosts(transactionManager.run(() -> peerInfoService.getPeers().stream().toList())); //FIXME: } public Collection getSeenButNotAddedHosts() { - return _seenButNotAdded.entrySet().stream() - .filter(e -> !persistentPeerDataService.existsHost(e.getKey())) - .map(e -> new AvailablePeerInfo(e.getKey().toString(), e.getValue().getAddr(), e.getValue().getPort())) - .toList(); + return transactionManager.run(() -> { + return peerDiscoveryDirectory.getReachablePeers().stream().filter(p -> !peerInfoService.getPeerInfo(p).isPresent()) + .map(p -> new AvailablePeerInfo(p.toString())).toList(); + }); } @FunctionalInterface @@ -271,7 +215,7 @@ public class PeerManager { void apply(UUID host); } - public record HostStateSnapshot(List available, List unavailable) { + public record HostStateSnapshot(Collection available, Collection unavailable) { } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java index 05f8f66f..cf3b3de3 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -1,360 +1,151 @@ package com.usatiuk.dhfs.objects.repository; -import com.usatiuk.dhfs.SerializationHelper; import com.usatiuk.dhfs.ShutdownChecker; -import com.usatiuk.dhfs.objects.jrepository.*; -import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectory; -import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectoryLocal; -import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; +import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Nullable; import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; -import org.apache.commons.lang3.SerializationUtils; import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.HashTreePSet; +import java.io.File; import java.io.IOException; import java.nio.file.Files; -import java.nio.file.Paths; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.security.KeyPair; +import java.security.cert.CertificateEncodingException; import java.security.cert.X509Certificate; -import java.util.List; -import java.util.Objects; import java.util.Optional; import java.util.UUID; import java.util.concurrent.ExecutorService; -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; - @ApplicationScoped public class PersistentPeerDataService { - final String dataFileName = "hosts"; - @ConfigProperty(name = "dhfs.objects.root") - String dataRoot; @Inject PeerTrustManager peerTrustManager; @Inject - JObjectManager jObjectManager; - @Inject ExecutorService executorService; @Inject - InvalidationQueueService invalidationQueueService; - @Inject RpcClientFactory rpcClientFactory; @Inject ShutdownChecker shutdownChecker; @Inject - JObjectTxManager jObjectTxManager; + TransactionManager jObjectTxManager; @Inject - SoftJObjectFactory softJObjectFactory; - SoftJObject peerDirectory; - SoftJObject peerDirectoryLocal; - private PersistentRemoteHosts _persistentData = new PersistentRemoteHosts(); - private UUID _selfUuid; + Transaction curTx; + @Inject + PeerInfoService peerInfoService; + @Inject + TransactionManager txm; + + @ConfigProperty(name = "dhfs.peerdiscovery.preset-uuid") + Optional presetUuid; + @ConfigProperty(name = "dhfs.objects.persistence.stuff.root") + String stuffRoot; + + private PeerId _selfUuid; + private X509Certificate _selfCertificate; + private KeyPair _selfKeyPair; void init(@Observes @Priority(300) StartupEvent event) throws IOException { - Paths.get(dataRoot).toFile().mkdirs(); - Log.info("Initializing with root " + dataRoot); - if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { - Log.info("Reading hosts"); - _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); - } else if (Paths.get(dataRoot).resolve(dataFileName + ".bak").toFile().exists()) { - Log.warn("Reading hosts from backup"); - _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); - } - _selfUuid = _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfUuid); - - if (_persistentData.runReadLocked(d -> d.getSelfCertificate() == null)) { - jObjectTxManager.executeTxAndFlush(() -> { - _persistentData.runWriteLocked(d -> { - try { - Log.info("Generating a key pair, please wait"); - d.setSelfKeyPair(CertificateTools.generateKeyPair()); - d.setSelfCertificate(CertificateTools.generateCertificate(d.getSelfKeyPair(), _selfUuid.toString())); - } catch (Exception e) { - throw new RuntimeException("Failed generating cert", e); - } - return null; - }); - var newpd = new PeerDirectory(); - jObjectManager.put(new PersistentPeerInfo(_selfUuid, getSelfCertificate()), Optional.of(PeerDirectory.PeerDirectoryObjName)); - newpd.getPeers().add(_selfUuid); - jObjectManager.put(newpd, Optional.empty()); - jObjectManager.put(new PeerDirectoryLocal(), Optional.empty()); - }); - } - - peerDirectory = softJObjectFactory.create(PeerDirectory.class, PeerDirectory.PeerDirectoryObjName); - peerDirectoryLocal = softJObjectFactory.create(PeerDirectoryLocal.class, PeerDirectoryLocal.PeerDirectoryLocalObjName); - - if (!shutdownChecker.lastShutdownClean()) { - _persistentData.getData().getIrregularShutdownCounter().addAndGet(1); - jObjectTxManager.executeTxAndFlush(() -> { - peerDirectoryLocal.get().rwLock(); - peerDirectoryLocal.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + jObjectTxManager.run(() -> { + var selfData = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + if (selfData != null) { + _selfUuid = selfData.selfUuid(); + _selfCertificate = selfData.selfCertificate(); + _selfKeyPair = selfData.selfKeyPair(); + return; + } else { try { - peerDirectoryLocal.get().getData().getInitialObjSyncDone().clear(); - peerDirectoryLocal.get().bumpVer(); - } finally { - peerDirectoryLocal.get().rwUnlock(); - } - }); - } + _selfUuid = presetUuid.map(PeerId::of).orElseGet(() -> PeerId.of(UUID.randomUUID().toString())); + Log.info("Generating a key pair, please wait"); + _selfKeyPair = CertificateTools.generateKeyPair(); + _selfCertificate = CertificateTools.generateCertificate(_selfKeyPair, _selfUuid.toString()); - jObjectManager.registerWriteListener(PersistentPeerInfo.class, this::pushPeerUpdates); - jObjectManager.registerWriteListener(PeerDirectory.class, this::pushPeerUpdates); - - // FIXME: Warn on failed resolves? - peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - peerTrustManager.reloadTrustManagerHosts(getHosts()); - return null; - }); - - Files.writeString(Paths.get(dataRoot, "self_uuid"), _selfUuid.toString()); - Log.info("Self uuid is: " + _selfUuid.toString()); - writeData(); - } - - void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException { - Log.info("Saving hosts"); - writeData(); - Log.info("Shutdown"); - } - - private void writeData() { - try { - if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) - Files.move(Paths.get(dataRoot).resolve(dataFileName), Paths.get(dataRoot).resolve(dataFileName + ".bak"), REPLACE_EXISTING); - Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData)); - } catch (IOException iex) { - Log.error("Error writing persistent hosts data", iex); - throw new RuntimeException(iex); - } - } - - private void pushPeerUpdates() { - pushPeerUpdates(null); - } - - private void pushPeerUpdates(@Nullable JObject obj) { - if (obj != null) - Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated"); - executorService.submit(() -> { - updateCerts(); - invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName); - for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList())) - invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p)); - }); - } - - private JObject getPeer(UUID uuid) { - var got = jObjectManager.get(PersistentPeerInfo.getNameFromUuid(uuid)).orElseThrow(() -> new IllegalStateException("Peer " + uuid + " not found")); - got.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d == null) throw new IllegalStateException("Could not resolve peer " + uuid); - if (!(d instanceof PersistentPeerInfo)) - throw new IllegalStateException("Peer " + uuid + " is of wrong type!"); - return null; - }); - return (JObject) got; - } - - private List getPeersSnapshot() { - return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getPeers().stream().map(u -> { - try { - return getPeer(u).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m2, d2) -> d2); - } catch (Exception e) { - Log.warn("Error making snapshot of peer " + u, e); - return null; - } - }).filter(Objects::nonNull).toList()); - } - - public UUID getSelfUuid() { - if (_selfUuid == null) - throw new IllegalStateException(); - else return _selfUuid; - } - - public String getUniqueId() { - String sb = String.valueOf(_selfUuid) + - _persistentData.getData().getIrregularShutdownCounter() + - "_" + - _persistentData.getData().getSelfCounter().addAndGet(1); - return sb; - } - - public PersistentPeerInfo getInfo(UUID name) { - return getPeer(name).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d); - } - - public List getHosts() { - return getPeersSnapshot().stream().filter(i -> !i.getUuid().equals(_selfUuid)).toList(); - } - - public List getHostUuids() { - return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().filter(i -> !i.equals(_selfUuid)).toList()); - } - - public List getHostUuidsAndSelf() { - return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList()); - } - - public List getHostsNoNulls() { - for (int i = 0; i < 5; i++) { - try { - return peerDirectory.get() - .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getPeers().stream() - .map(u -> getPeer(u).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m2, d2) -> d2)) - .filter(e -> !e.getUuid().equals(_selfUuid)).toList()); - } catch (Exception e) { - Log.warn("Error when making snapshot of hosts: " + e.getMessage()); - try { - Thread.sleep(i * 2); - } catch (InterruptedException ignored) { + curTx.put(new PersistentRemoteHostsData(_selfUuid, _selfCertificate, _selfKeyPair, HashTreePSet.empty())); + peerInfoService.putPeer(_selfUuid, _selfCertificate.getEncoded()); + } catch (CertificateEncodingException e) { + throw new RuntimeException(e); } } - } - throw new StatusRuntimeException(Status.ABORTED.withDescription("Could not make a snapshot of peers in 5 tries!")); - } - - public boolean addHost(PersistentPeerInfo persistentPeerInfo) { - return jObjectTxManager.executeTx(() -> { - if (persistentPeerInfo.getUuid().equals(_selfUuid)) return false; - - boolean added = peerDirectory.get().runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { - boolean addedInner = d.getPeers().add(persistentPeerInfo.getUuid()); - if (addedInner) { - jObjectManager.put(persistentPeerInfo, Optional.of(m.getName())); - b.apply(); - } - return addedInner; - }); - return added; }); + peerTrustManager.reloadTrustManagerHosts(peerInfoService.getPeers()); + Log.info("Self uuid is: " + _selfUuid.toString()); + new File(stuffRoot).mkdirs(); + Files.write(Path.of(stuffRoot, "self_uuid"), _selfUuid.id().toString().getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); } - public boolean removeHost(UUID host) { - return jObjectTxManager.executeTx(() -> { - boolean removed = peerDirectory.get().runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { - boolean removedInner = d.getPeers().remove(host); - Log.info("Removing host: " + host + (removedInner ? " removed" : " did not exists")); - if (removedInner) { - peerDirectoryLocal.get().rwLock(); - peerDirectoryLocal.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - try { - peerDirectoryLocal.get().getData().getInitialObjSyncDone().remove(host); - peerDirectoryLocal.get().getData().getInitialOpSyncDone().remove(host); - peerDirectoryLocal.get().bumpVer(); - } finally { - peerDirectoryLocal.get().rwUnlock(); - } - getPeer(host).runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mp, dp, bp, vp) -> { - mp.removeRef(m.getName()); - return null; - }); - b.apply(); - } - return removedInner; - }); - return removed; - }); +// private void pushPeerUpdates() { +// pushPeerUpdates(null); +// } + +// private void pushPeerUpdates(@Nullable JObject obj) { +// if (obj != null) +// Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated"); +// executorService.submit(() -> { +// updateCerts(); +// invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName); +// for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList())) +// invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p)); +// }); +// } + + public PeerId getSelfUuid() { + return _selfUuid; } - private void updateCerts() { - try { - peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - peerTrustManager.reloadTrustManagerHosts(getHostsNoNulls()); - // Fixme:? I don't think it should be needed with custom trust store - // but it doesn't work? - rpcClientFactory.dropCache(); - return null; - }); - } catch (Exception ex) { - Log.warn("Error when refreshing certificates, will retry: " + ex.getMessage()); - pushPeerUpdates(); - } - } - - public boolean existsHost(UUID uuid) { - return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().contains(uuid)); - } - - public PersistentPeerInfo getHost(UUID uuid) { - if (!existsHost(uuid)) - throw new StatusRuntimeException(Status.NOT_FOUND); - return getPeer(uuid).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d); - } +// private void updateCerts() { +// try { +// peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { +// peerTrustManager.reloadTrustManagerHosts(getHostsNoNulls()); +// // Fixme:? I don't think it should be needed with custom trust store +// // but it doesn't work? +// rpcClientFactory.dropCache(); +// return null; +// }); +// } catch (Exception ex) { +// Log.warn("Error when refreshing certificates, will retry: " + ex.getMessage()); +// pushPeerUpdates(); +// } +// } public KeyPair getSelfKeypair() { - return _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfKeyPair); + return _selfKeyPair; } public X509Certificate getSelfCertificate() { - return _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfCertificate); + return _selfCertificate; } // Returns true if host's initial sync wasn't done before, and marks it as done - public boolean markInitialOpSyncDone(UUID connectedHost) { - return jObjectTxManager.executeTx(() -> { - peerDirectoryLocal.get().rwLock(); - try { - peerDirectoryLocal.get().local(); - boolean contained = peerDirectoryLocal.get().getData().getInitialOpSyncDone().contains(connectedHost); - - if (!contained) - peerDirectoryLocal.get().local().mutate(new JMutator() { - @Override - public boolean mutate(PeerDirectoryLocal object) { - object.getInitialOpSyncDone().add(connectedHost); - return true; - } - - @Override - public void revert(PeerDirectoryLocal object) { - object.getInitialOpSyncDone().remove(connectedHost); - } - }); - return !contained; - } finally { - peerDirectoryLocal.get().rwUnlock(); - } + public boolean markInitialSyncDone(PeerId peerId) { + return txm.run(() -> { + var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + if (data == null) throw new IllegalStateException("Self data not found"); + boolean exists = data.initialSyncDone().contains(peerId); + if (exists) return false; + curTx.put(data.withInitialSyncDone(data.initialSyncDone().plus(peerId))); + return true; }); } - public boolean markInitialObjSyncDone(UUID connectedHost) { - return jObjectTxManager.executeTx(() -> { - peerDirectoryLocal.get().rwLock(); - try { - peerDirectoryLocal.get().local(); - boolean contained = peerDirectoryLocal.get().getData().getInitialObjSyncDone().contains(connectedHost); - - if (!contained) - peerDirectoryLocal.get().local().mutate(new JMutator() { - @Override - public boolean mutate(PeerDirectoryLocal object) { - object.getInitialObjSyncDone().add(connectedHost); - return true; - } - - @Override - public void revert(PeerDirectoryLocal object) { - object.getInitialObjSyncDone().remove(connectedHost); - } - }); - return !contained; - } finally { - peerDirectoryLocal.get().rwUnlock(); - } + // Returns true if it was marked as done before, and resets it + public boolean resetInitialSyncDone(PeerId peerId) { + return txm.run(() -> { + var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + if (data == null) throw new IllegalStateException("Self data not found"); + boolean exists = data.initialSyncDone().contains(peerId); + if (!exists) return false; + curTx.put(data.withInitialSyncDone(data.initialSyncDone().minus(peerId))); + return true; }); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java index a6b0c8f3..5e820e18 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java @@ -1,29 +1,35 @@ package com.usatiuk.dhfs.objects.repository; -import lombok.Getter; -import lombok.Setter; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import org.pcollections.PSet; -import java.io.Serial; import java.io.Serializable; import java.security.KeyPair; import java.security.cert.X509Certificate; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; -public class PersistentRemoteHostsData implements Serializable { - @Serial - private static final long serialVersionUID = 1L; +public record PersistentRemoteHostsData(PeerId selfUuid, + X509Certificate selfCertificate, + KeyPair selfKeyPair, + PSet initialSyncDone) implements JData, Serializable { + public static final JObjectKey KEY = JObjectKey.of("self_peer_data"); - @Getter - private final UUID _selfUuid = UUID.randomUUID(); - @Getter - private final AtomicLong _selfCounter = new AtomicLong(); - @Getter - private final AtomicLong _irregularShutdownCounter = new AtomicLong(); - @Getter - @Setter - private X509Certificate _selfCertificate = null; - @Getter - @Setter - private KeyPair _selfKeyPair = null; + @Override + public JObjectKey key() { + return KEY; + } + + + public PersistentRemoteHostsData withInitialSyncDone(PSet initialSyncDone) { + return new PersistentRemoteHostsData(selfUuid, selfCertificate, selfKeyPair, initialSyncDone); + } + + @Override + public String toString() { + return "PersistentRemoteHostsData{" + + "selfUuid=" + selfUuid + + ", initialSyncDone=" + initialSyncDone + + '}'; + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java index a9a277c4..56135ef5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java @@ -1,11 +1,11 @@ package com.usatiuk.dhfs.objects.repository; -import com.google.common.collect.Maps; import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jrepository.*; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.JObjectKeyP; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; +import com.usatiuk.dhfs.objects.transaction.Transaction; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; @@ -13,11 +13,14 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; -import javax.annotation.Nullable; -import java.util.*; +import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.function.Function; import java.util.stream.Collectors; @ApplicationScoped @@ -29,146 +32,142 @@ public class RemoteObjectServiceClient { RpcClientFactory rpcClientFactory; @Inject - JObjectManager jObjectManager; + TransactionManager txm; + @Inject + Transaction curTx; + @Inject + RemoteTransaction remoteTx; @Inject SyncHandler syncHandler; @Inject InvalidationQueueService invalidationQueueService; @Inject - ProtoSerializer dataProtoSerializer; + ProtoSerializer opProtoSerializer; @Inject - ProtoSerializer opProtoSerializer; - @Inject - JObjectTxManager jObjectTxManager; + ProtoSerializer receivedObjectProtoSerializer; - public Pair getSpecificObject(UUID host, String name) { - return rpcClientFactory.withObjSyncClient(host, client -> { - var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(name).build()); - return Pair.of(reply.getObject().getHeader(), reply.getObject().getContent()); - }); - } + private final ExecutorService _batchExecutor = Executors.newVirtualThreadPerTaskExecutor(); - public JObjectDataP getObject(JObject jObject) { - jObject.assertRwLock(); +// public Pair getSpecificObject(UUID host, String name) { +// return rpcClientFactory.withObjSyncClient(host, client -> { +// var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(name).build()); +// return Pair.of(reply.getObject().getHeader(), reply.getObject().getContent()); +// }); +// } - var targets = jObject.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d) -> { - var ourVersion = md.getOurVersion(); - if (ourVersion >= 1) - return md.getRemoteCopies().entrySet().stream() - .filter(entry -> entry.getValue().equals(ourVersion)) - .map(Map.Entry::getKey).toList(); - else - return persistentPeerDataService.getHostUuids(); - }); + public void getObject(JObjectKey key, Function, Boolean> onReceive) { + var objMeta = remoteTx.getMeta(key).orElse(null); + + if (objMeta == null) { + throw new IllegalArgumentException("Object " + key + " not found"); + } + + var targetVersion = objMeta.versionSum(); + var targets = objMeta.knownRemoteVersions().entrySet().stream() + .filter(entry -> entry.getValue().equals(targetVersion)) + .map(Map.Entry::getKey).toList(); if (targets.isEmpty()) - throw new IllegalStateException("No targets for object " + jObject.getMeta().getName()); + throw new IllegalStateException("No targets for object " + key); - Log.info("Downloading object " + jObject.getMeta().getName() + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); + Log.info("Downloading object " + key + " from " + targets); - return rpcClientFactory.withObjSyncClient(targets, client -> { - var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(jObject.getMeta().getName()).build()); + rpcClientFactory.withObjSyncClient(targets, (peer, client) -> { + var reply = client.getObject(GetObjectRequest.newBuilder().setName(JObjectKeyP.newBuilder().setName(key.toString()).build()).build()); - var receivedMap = new HashMap(); - for (var e : reply.getObject().getHeader().getChangelog().getEntriesList()) { - receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); + var deserialized = receivedObjectProtoSerializer.deserialize(reply); + + if (!onReceive.apply(Pair.of(peer, deserialized))) { + throw new StatusRuntimeException(Status.ABORTED.withDescription("Failed to process object " + key + " from " + peer)); } - return jObjectTxManager.executeTx(() -> { - return jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d, b, v) -> { - var unexpected = !Objects.equals( - Maps.filterValues(md.getChangelog(), val -> val != 0), - Maps.filterValues(receivedMap, val -> val != 0)); - - if (unexpected) { - try { - syncHandler.handleOneUpdate(UUID.fromString(reply.getSelfUuid()), reply.getObject().getHeader()); - } catch (SyncHandler.OutdatedUpdateException ignored) { - Log.info("Outdated update of " + md.getName() + " from " + reply.getSelfUuid()); - invalidationQueueService.pushInvalidationToOne(UUID.fromString(reply.getSelfUuid()), md.getName()); // True? - throw new StatusRuntimeException(Status.ABORTED.withDescription("Received outdated object version")); - } catch (Exception e) { - Log.error("Received unexpected object version from " + reply.getSelfUuid() - + " for " + reply.getObject().getHeader().getName() + " and conflict resolution failed", e); - throw new StatusRuntimeException(Status.ABORTED.withDescription("Received unexpected object version")); - } - } - - return reply.getObject().getContent(); - }); - }); + return null; +// return jObjectTxManager.executeTx(() -> { +// return key.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d, b, v) -> { +// var unexpected = !Objects.equals( +// Maps.filterValues(md.getChangelog(), val -> val != 0), +// Maps.filterValues(receivedMap, val -> val != 0)); +// +// if (unexpected) { +// try { +// syncHandler.handleOneUpdate(UUID.fromString(reply.getSelfUuid()), reply.getObject().getHeader()); +// } catch (SyncHandler.OutdatedUpdateException ignored) { +// Log.info("Outdated update of " + md.getName() + " from " + reply.getSelfUuid()); +// invalidationQueueService.pushInvalidationToOne(UUID.fromString(reply.getSelfUuid()), md.getName()); // True? +// throw new StatusRuntimeException(Status.ABORTED.withDescription("Received outdated object version")); +// } catch (Exception e) { +// Log.error("Received unexpected object version from " + reply.getSelfUuid() +// + " for " + reply.getObject().getHeader().getName() + " and conflict resolution failed", e); +// throw new StatusRuntimeException(Status.ABORTED.withDescription("Received unexpected object version")); +// } +// } +// +// return reply.getObject().getContent(); +// }); +// }); }); } - @Nullable - public IndexUpdateReply notifyUpdate(JObject obj, UUID host) { - var builder = IndexUpdatePush.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); - - var header = obj - .runReadLocked( - obj.getMeta().getKnownClass().isAnnotationPresent(PushResolution.class) - ? JObjectManager.ResolutionStrategy.LOCAL_ONLY - : JObjectManager.ResolutionStrategy.NO_RESOLUTION, - (m, d) -> { - if (obj.getMeta().isDeleted()) return null; - if (m.getKnownClass().isAnnotationPresent(PushResolution.class) && d == null) - Log.warn("Object " + m.getName() + " is marked as PushResolution but no resolution found"); - if (m.getKnownClass().isAnnotationPresent(PushResolution.class)) - return m.toRpcHeader(dataProtoSerializer.serialize(d)); - else - return m.toRpcHeader(); - }); - if (header == null) return null; - jObjectTxManager.executeTx(obj::markSeen); - builder.setHeader(header); - - var send = builder.build(); - - return rpcClientFactory.withObjSyncClient(host, client -> client.indexUpdate(send)); - } - - public OpPushReply pushOps(List ops, String queueName, UUID host) { + // @Nullable +// public IndexUpdateReply notifyUpdate(JObject obj, UUID host) { +// var builder = IndexUpdatePush.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); +// +// var header = obj +// .runReadLocked( +// obj.getMeta().getKnownClass().isAnnotationPresent(PushResolution.class) +// ? JObjectManager.ResolutionStrategy.LOCAL_ONLY +// : JObjectManager.ResolutionStrategy.NO_RESOLUTION, +// (m, d) -> { +// if (obj.getMeta().isDeleted()) return null; +// if (m.getKnownClass().isAnnotationPresent(PushResolution.class) && d == null) +// Log.warn("Object " + m.getName() + " is marked as PushResolution but no resolution found"); +// if (m.getKnownClass().isAnnotationPresent(PushResolution.class)) +// return m.toRpcHeader(dataProtoSerializer.serialize(d)); +// else +// return m.toRpcHeader(); +// }); +// if (header == null) return null; +// jObjectTxManager.executeTx(obj::markSeen); +// builder.setHeader(header); +// +// var send = builder.build(); +// +// return rpcClientFactory.withObjSyncClient(host, client -> client.indexUpdate(send)); +// } +// + public OpPushReply pushOps(PeerId target, List ops) { for (Op op : ops) { - for (var ref : op.getEscapedRefs()) { - jObjectTxManager.executeTx(() -> { - jObjectManager.get(ref).ifPresent(JObject::markSeen); - }); - } + txm.run(() -> { + for (var ref : op.getEscapedRefs()) { + curTx.get(RemoteObjectMeta.class, ref).map(m -> m.withSeen(true)).ifPresent(curTx::put); + } + }); + var serialized = opProtoSerializer.serialize(op); + var built = OpPushRequest.newBuilder().addMsg(serialized).build(); + rpcClientFactory.withObjSyncClient(target, (tgt, client) -> client.opPush(built)); } - var builder = OpPushMsg.newBuilder() - .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) - .setQueueId(queueName); - for (var op : ops) - builder.addMsg(opProtoSerializer.serialize(op)); - return rpcClientFactory.withObjSyncClient(host, client -> client.opPush(builder.build())); + return OpPushReply.getDefaultInstance(); } - public Collection canDelete(Collection targets, String object, Collection ourReferrers) { - ConcurrentLinkedDeque results = new ConcurrentLinkedDeque<>(); - Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); - try (var executor = Executors.newVirtualThreadPerTaskExecutor()) { - try { - executor.invokeAll(targets.stream().>map(h -> () -> { - try { - var req = CanDeleteRequest.newBuilder() - .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) - .setName(object); - req.addAllOurReferrers(ourReferrers); - var res = rpcClientFactory.withObjSyncClient(h, client -> client.canDelete(req.build())); - if (res != null) - results.add(res); - } catch (Exception e) { - Log.debug("Error when asking canDelete for object " + object, e); - } - return null; - }).toList()); - } catch (InterruptedException e) { - Log.warn("Interrupted waiting for canDelete for object " + object); - } - if (!executor.shutdownNow().isEmpty()) - Log.warn("Didn't ask all targets when asking canDelete for " + object); + public Collection> canDelete(Collection targets, JObjectKey objKey, Collection ourReferrers) { + Log.trace("Asking canDelete for " + objKey + " from " + targets.stream().map(PeerId::toString).collect(Collectors.joining(", "))); + try { + return _batchExecutor.invokeAll(targets.stream().>>map(h -> () -> { + var req = CanDeleteRequest.newBuilder().setName(JObjectKeyP.newBuilder().setName(objKey.toString()).build()); + for (var ref : ourReferrers) { + req.addOurReferrers(JObjectKeyP.newBuilder().setName(ref.toString()).build()); + } + return Pair.of(h, rpcClientFactory.withObjSyncClient(h, (p, client) -> client.canDelete(req.build()))); + }).toList()).stream().map(f -> { + try { + return f.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + }).toList(); + } catch (InterruptedException e) { + throw new RuntimeException(e); } - return results; } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java index fde49ecb..466df67d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java @@ -1,172 +1,172 @@ package com.usatiuk.dhfs.objects.repository; import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor; +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.JObjectKeyP; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; +import com.usatiuk.dhfs.objects.repository.invalidation.OpHandler; +import com.usatiuk.dhfs.objects.transaction.Transaction; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.grpc.GrpcService; import io.quarkus.logging.Log; +import io.quarkus.security.identity.SecurityIdentity; import io.smallrye.common.annotation.Blocking; import io.smallrye.mutiny.Uni; import jakarta.annotation.security.RolesAllowed; import jakarta.inject.Inject; - -import java.util.UUID; +import org.apache.commons.lang3.tuple.Pair; // Note: RunOnVirtualThread hangs somehow @GrpcService @RolesAllowed("cluster-member") public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { - @Inject - SyncHandler syncHandler; +// @Inject +// SyncHandler syncHandler; @Inject - JObjectManager jObjectManager; - + TransactionManager txm; @Inject - PeerManager remoteHostManager; - + PeerManager peerManager; @Inject - AutoSyncProcessor autoSyncProcessor; - + Transaction curTx; @Inject PersistentPeerDataService persistentPeerDataService; @Inject InvalidationQueueService invalidationQueueService; - @Inject - ProtoSerializer dataProtoSerializer; + SecurityIdentity identity; @Inject - ProtoSerializer opProtoSerializer; - + ProtoSerializer opProtoSerializer; @Inject - OpObjectRegistry opObjectRegistry; - + ProtoSerializer receivedObjectProtoSerializer; @Inject - JObjectTxManager jObjectTxManager; + RemoteTransaction remoteTx; + @Inject + OpHandler opHandler; @Override @Blocking public Uni getObject(GetObjectRequest request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) - throw new StatusRuntimeException(Status.UNAUTHENTICATED); + Log.info("<-- getObject: " + request.getName() + " from " + identity.getPrincipal().getName().substring(3)); - Log.info("<-- getObject: " + request.getName() + " from " + request.getSelfUuid()); - - var obj = jObjectManager.get(request.getName()).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); - - // Does @Blocking break this? - return Uni.createFrom().emitter(emitter -> { - var replyObj = jObjectTxManager.executeTx(() -> { - // Obj.markSeen before markSeen of its children - obj.markSeen(); - return obj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (meta, data) -> { - if (meta.isOnlyLocal()) - throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Trying to get local-only object")); - if (data == null) { - Log.info("<-- getObject FAIL: " + request.getName() + " from " + request.getSelfUuid()); - throw new StatusRuntimeException(Status.ABORTED.withDescription("Not available locally")); - } - data.extractRefs().forEach(ref -> - jObjectManager.get(ref) - .orElseThrow(() -> new IllegalStateException("Non-hydrated refs for local object?")) - .markSeen()); - - return ApiObject.newBuilder() - .setHeader(obj.getMeta().toRpcHeader()) - .setContent(dataProtoSerializer.serialize(obj.getData())).build(); - }); - }); - var ret = GetObjectReply.newBuilder() - .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) - .setObject(replyObj).build(); - // TODO: Could this cause problems if we wait for too long? - obj.commitFenceAsync(() -> emitter.complete(ret)); + Pair got = txm.run(() -> { + var meta = remoteTx.getMeta(JObjectKey.of(request.getName().getName())).orElse(null); + var obj = remoteTx.getDataLocal(JDataRemote.class, JObjectKey.of(request.getName().getName())).orElse(null); + if (meta != null && !meta.seen()) + curTx.put(meta.withSeen(true)); + if (obj != null) + for (var ref : obj.collectRefsTo()) { + var refMeta = remoteTx.getMeta(ref).orElse(null); + if (refMeta != null && !refMeta.seen()) + curTx.put(refMeta.withSeen(true)); + } + return Pair.of(meta, obj); }); + + if ((got.getValue() != null) && (got.getKey() == null)) { + Log.error("Inconsistent state for object meta: " + request.getName()); + throw new StatusRuntimeException(Status.INTERNAL); + } + + if (got.getValue() == null) { + Log.info("<-- getObject NOT FOUND: " + request.getName() + " from " + identity.getPrincipal().getName().substring(3)); + throw new StatusRuntimeException(Status.NOT_FOUND); + } + + var serialized = receivedObjectProtoSerializer.serialize(new ReceivedObject(got.getKey().changelog(), got.getValue())); + return Uni.createFrom().item(serialized); +// // Does @Blocking break this? +// return Uni.createFrom().emitter(emitter -> { +// try { +// } catch (Exception e) { +// emitter.fail(e); +// } +// var replyObj = txm.run(() -> { +// var cur = curTx.get(JDataRemote.class, JObjectKey.of(request.getName())).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); +// // Obj.markSeen before markSeen of its children +// obj.markSeen(); +// return obj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (meta, data) -> { +// if (meta.isOnlyLocal()) +// throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Trying to get local-only object")); +// if (data == null) { +// Log.info("<-- getObject FAIL: " + request.getName() + " from " + request.getSelfUuid()); +// throw new StatusRuntimeException(Status.ABORTED.withDescription("Not available locally")); +// } +// data.extractRefs().forEach(ref -> +// jObjectManager.get(ref) +// .orElseThrow(() -> new IllegalStateException("Non-hydrated refs for local object?")) +// .markSeen()); +// +// return ApiObject.newBuilder() +// .setHeader(obj.getMeta().toRpcHeader()) +// .setContent(dataProtoSerializer.serialize(obj.getData())).build(); +// }); +// }); +// var ret = GetObjectReply.newBuilder() +// .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) +// .setObject(replyObj).build(); +// emitter.complete(ret); +// // TODO: Could this cause problems if we wait for too long? +//// obj.commitFenceAsync(() -> emitter.complete(ret)); +// }); } @Override @Blocking public Uni canDelete(CanDeleteRequest request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) - throw new StatusRuntimeException(Status.UNAUTHENTICATED); + var peerId = identity.getPrincipal().getName().substring(3); - Log.info("<-- canDelete: " + request.getName() + " from " + request.getSelfUuid()); + Log.info("<-- canDelete: " + request.getName() + " from " + peerId); var builder = CanDeleteReply.newBuilder(); - var obj = jObjectManager.get(request.getName()); + txm.run(() -> { + var obj = curTx.get(RemoteObjectMeta.class, JObjectKey.of(request.getName().getName())).orElse(null); - builder.setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); - builder.setObjName(request.getName()); + if (obj == null) { + builder.setDeletionCandidate(true); + return; + } - if (obj.isPresent()) try { - boolean tryUpdate = obj.get().runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> { - if (m.isDeleted() && !m.isDeletionCandidate()) - throw new IllegalStateException("Object " + m.getName() + " is deleted but not a deletion candidate"); - builder.setDeletionCandidate(m.isDeletionCandidate()); - builder.addAllReferrers(m.getReferrers()); - return m.isDeletionCandidate() && !m.isDeleted(); - }); - // FIXME -// if (tryUpdate) { -// obj.get().runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { -// return null; -// }); -// } - } catch (DeletedObjectAccessException dox) { - builder.setDeletionCandidate(true); - } - else { - builder.setDeletionCandidate(true); - } + builder.setDeletionCandidate(!obj.frozen() && obj.refsFrom().isEmpty()); - var ret = builder.build(); + if (!builder.getDeletionCandidate()) + for (var r : obj.refsFrom()) + builder.addReferrers(JObjectKeyP.newBuilder().setName(r.toString()).build()); - if (!ret.getDeletionCandidate()) - for (var rr : request.getOurReferrersList()) - autoSyncProcessor.add(rr); - - return Uni.createFrom().item(ret); - } - - @Override - @Blocking - public Uni indexUpdate(IndexUpdatePush request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) - throw new StatusRuntimeException(Status.UNAUTHENTICATED); - -// Log.info("<-- indexUpdate: " + request.getHeader().getName()); - return jObjectTxManager.executeTxAndFlush(() -> { - return Uni.createFrom().item(syncHandler.handleRemoteUpdate(request)); +// if (!ret.getDeletionCandidate()) +// for (var rr : request.getOurReferrersList()) +// autoSyncProcessor.add(rr); }); + return Uni.createFrom().item(builder.build()); } + // @Override +// @Blocking +// public Uni indexUpdate(IndexUpdatePush request) { +// if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); +// if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) +// throw new StatusRuntimeException(Status.UNAUTHENTICATED); +// +// Log.info("<-- indexUpdate: " + request.getHeader().getName()); +// return jObjectTxManager.executeTxAndFlush(() -> { +// return Uni.createFrom().item(syncHandler.handleRemoteUpdate(request)); +// }); +// } @Override @Blocking - public Uni opPush(OpPushMsg request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) - throw new StatusRuntimeException(Status.UNAUTHENTICATED); - + public Uni opPush(OpPushRequest request) { try { - var objs = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList(); - jObjectTxManager.executeTxAndFlush(() -> { - opObjectRegistry.acceptExternalOps(request.getQueueId(), UUID.fromString(request.getSelfUuid()), objs); - }); + var ops = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList(); + for (var op : ops) { + Log.info("<-- op: " + op + " from " + identity.getPrincipal().getName().substring(3)); + txm.run(() -> { + opHandler.handleOp(PeerId.of(identity.getPrincipal().getName().substring(3)), op); + }); + } } catch (Exception e) { Log.error(e, e); throw e; @@ -174,11 +174,10 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { return Uni.createFrom().item(OpPushReply.getDefaultInstance()); } + @Override @Blocking public Uni ping(PingRequest request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - return Uni.createFrom().item(PingReply.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); + return Uni.createFrom().item(PingReply.getDefaultInstance()); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java index 3239ec7d..a985be13 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java @@ -1,35 +1,25 @@ package com.usatiuk.dhfs.objects.repository; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; import io.grpc.ChannelCredentials; import io.grpc.ManagedChannel; import io.grpc.TlsChannelCredentials; import io.grpc.netty.NettyChannelBuilder; -import io.quarkus.runtime.ShutdownEvent; -import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import javax.net.ssl.KeyManagerFactory; import java.security.KeyStore; import java.security.cert.Certificate; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; -//FIXME: Leaks! @ApplicationScoped public class RpcChannelFactory { @Inject PersistentPeerDataService persistentPeerDataService; @Inject PeerTrustManager peerTrustManager; - private ConcurrentMap _secureChannelCache = new ConcurrentHashMap<>(); - - void shutdown(@Observes @Priority(100000) ShutdownEvent event) { - for (var c : _secureChannelCache.values()) c.shutdownNow(); - } private ChannelCredentials getChannelCredentials() { try { @@ -48,22 +38,7 @@ public class RpcChannelFactory { } } - ManagedChannel getSecureChannel(String host, String address, int port) { - var key = new SecureChannelKey(host, address, port); - return _secureChannelCache.computeIfAbsent(key, (k) -> { - return NettyChannelBuilder.forAddress(address, port, getChannelCredentials()).overrideAuthority(host).idleTimeout(10, TimeUnit.SECONDS).build(); - }); - } - - public void dropCache() { - var oldS = _secureChannelCache; - _secureChannelCache = new ConcurrentHashMap<>(); - oldS.values().forEach(ManagedChannel::shutdown); - } - - private record SecureChannelKey(String host, String address, int port) { - } - - private record InsecureChannelKey(String address, int port) { + ManagedChannel getSecureChannel(PeerId host, String address, int port) { + return NettyChannelBuilder.forAddress(address, port, getChannelCredentials()).overrideAuthority(host.toString()).idleTimeout(10, TimeUnit.SECONDS).build(); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java index aff24f85..ab234a66 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java @@ -1,5 +1,8 @@ package com.usatiuk.dhfs.objects.repository; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.IpPeerAddress; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerAddress; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; @@ -7,10 +10,10 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.eclipse.microprofile.config.inject.ConfigProperty; +import java.net.InetAddress; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; @@ -22,17 +25,18 @@ public class RpcClientFactory { long syncTimeout; @Inject - PeerManager remoteHostManager; + PeerManager peerManager; @Inject RpcChannelFactory rpcChannelFactory; + // FIXME: Leaks! private ConcurrentMap _objSyncCache = new ConcurrentHashMap<>(); - public R withObjSyncClient(Collection targets, ObjectSyncClientFunction fn) { + public R withObjSyncClient(Collection targets, ObjectSyncClientFunction fn) { var shuffledList = new ArrayList<>(targets); Collections.shuffle(shuffledList); - for (UUID target : shuffledList) { + for (PeerId target : shuffledList) { try { return withObjSyncClient(target, fn); } catch (StatusRuntimeException e) { @@ -47,42 +51,44 @@ public class RpcClientFactory { throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("No reachable targets!")); } - public R withObjSyncClient(UUID target, ObjectSyncClientFunction fn) { - var hostinfo = remoteHostManager.getTransientState(target); - boolean reachable = remoteHostManager.isReachable(target); + public R withObjSyncClient(PeerId target, ObjectSyncClientFunction fn) { + var hostinfo = peerManager.getAddress(target); - if (hostinfo.getAddr() == null) - throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Address for " + target + " not yet known")); - - if (!reachable) + if (hostinfo == null) throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Not known to be reachable: " + target)); - return withObjSyncClient(target.toString(), hostinfo.getAddr(), hostinfo.getSecurePort(), syncTimeout, fn); + return withObjSyncClient(target, hostinfo, syncTimeout, fn); } - public R withObjSyncClient(String host, String addr, int port, long timeout, ObjectSyncClientFunction fn) { + public R withObjSyncClient(PeerId host, PeerAddress address, long timeout, ObjectSyncClientFunction fn) { + return switch (address) { + case IpPeerAddress ipPeerAddress -> + withObjSyncClient(host, ipPeerAddress.address(), ipPeerAddress.securePort(), timeout, fn); + default -> throw new IllegalStateException("Unexpected value: " + address); + }; + } + + public R withObjSyncClient(PeerId host, InetAddress addr, int port, long timeout, ObjectSyncClientFunction fn) { var key = new ObjSyncStubKey(host, addr, port); var stub = _objSyncCache.computeIfAbsent(key, (k) -> { - var channel = rpcChannelFactory.getSecureChannel(host, addr, port); + var channel = rpcChannelFactory.getSecureChannel(host, addr.getHostAddress(), port); return DhfsObjectSyncGrpcGrpc.newBlockingStub(channel) .withMaxOutboundMessageSize(Integer.MAX_VALUE) .withMaxInboundMessageSize(Integer.MAX_VALUE); - }); - return fn.apply(stub.withDeadlineAfter(timeout, TimeUnit.SECONDS)); + return fn.apply(host, stub.withDeadlineAfter(timeout, TimeUnit.SECONDS)); } public void dropCache() { - rpcChannelFactory.dropCache(); _objSyncCache = new ConcurrentHashMap<>(); } @FunctionalInterface public interface ObjectSyncClientFunction { - R apply(DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub client); + R apply(PeerId peer, DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub client); } - private record ObjSyncStubKey(String host, String address, int port) { + private record ObjSyncStubKey(PeerId id, InetAddress addr, int port) { } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java index cc88f97d..e1bdeff1 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -1,207 +1,84 @@ package com.usatiuk.dhfs.objects.repository; -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; +import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace; -import io.grpc.Status; +import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.inject.Instance; import jakarta.inject.Inject; +import org.pcollections.HashTreePMap; +import org.pcollections.PMap; -import java.util.HashMap; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import javax.annotation.Nullable; @ApplicationScoped public class SyncHandler { @Inject - JObjectManager jObjectManager; - @Inject - PeerManager remoteHostManager; - @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - @Inject - InvalidationQueueService invalidationQueueService; - @Inject - Instance conflictResolvers; + Transaction curTx; @Inject PersistentPeerDataService persistentPeerDataService; @Inject - ProtoSerializer dataProtoSerializer; + TransactionManager txm; @Inject - OpObjectRegistry opObjectRegistry; - @Inject - JObjectTxManager jObjectTxManager; + InvalidationQueueService invalidationQueueService; - public void pushInitialResyncObj(UUID host) { - Log.info("Doing initial object push for " + host); + public void handleRemoteUpdate(PeerId from, JObjectKey key, PMap receivedChangelog, @Nullable JDataRemote receivedData) { + var current = curTx.get(RemoteObjectMeta.class, key).orElse(null); + if (current == null) { + current = new RemoteObjectMeta(key, HashTreePMap.empty()); + curTx.put(current); + } - var objs = jObjectManager.findAll(); + var changelogCompare = SyncHelper.compareChangelogs(current.changelog(), receivedChangelog); - for (var obj : objs) { - Log.trace("IS: " + obj + " to " + host); - invalidationQueueService.pushInvalidationToOne(host, obj); + switch (changelogCompare) { + case EQUAL -> { + Log.debug("No action on update: " + key + " from " + from); + if (!current.hasLocalData() && receivedData != null) { + current = current.withHaveLocal(true); + curTx.put(current); + curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key())) + .map(w -> w.withData(receivedData)).orElse(new RemoteObjectDataWrapper<>(receivedData))); + } + } + case NEWER -> { + Log.debug("Received newer index update than known: " + key + " from " + from); + var newChangelog = receivedChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ? + receivedChangelog : receivedChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L); + current = current.withChangelog(newChangelog); + + if (receivedData != null) { + current = current.withHaveLocal(true); + curTx.put(current); + curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key())) + .map(w -> w.withData(receivedData)).orElse(new RemoteObjectDataWrapper<>(receivedData))); + } else { + current = current.withHaveLocal(false); + curTx.put(current); + } + } + case OLDER -> { + Log.debug("Received older index update than known: " + key + " from " + from); + return; + } + case CONFLICT -> { + Log.debug("Conflict on update (inconsistent version): " + key + " from " + from); + // TODO: + return; + } + } + var curKnownRemoteVersion = current.knownRemoteVersions().get(from); + var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum(); + + if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) { + current = current.withKnownRemoteVersions(current.knownRemoteVersions().plus(from, receivedTotalVer)); + curTx.put(current); } } - public void pushInitialResyncOp(UUID host) { - Log.info("Doing initial op push for " + host); - - jObjectTxManager.executeTxAndFlush( - () -> { - opObjectRegistry.pushBootstrapData(host); - } - ); - } - - public void handleOneUpdate(UUID from, ObjectHeader header) { - AtomicReference> foundExt = new AtomicReference<>(); - - boolean conflict = jObjectTxManager.executeTx(() -> { - JObject found = jObjectManager.getOrPut(header.getName(), JObjectData.class, Optional.empty()); - foundExt.set(found); - - var receivedTotalVer = header.getChangelog().getEntriesList() - .stream().map(ObjectChangelogEntry::getVersion).reduce(0L, Long::sum); - - var receivedMap = new HashMap(); - for (var e : header.getChangelog().getEntriesList()) { - receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); - } - - return found.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, data, bump, invalidate) -> { - if (md.getRemoteCopies().getOrDefault(from, 0L) > receivedTotalVer) { - Log.error("Received older index update than was known for host: " - + from + " " + header.getName()); - throw new OutdatedUpdateException(); - } - - String rcv = ""; - for (var e : header.getChangelog().getEntriesList()) { - rcv += e.getHost() + ": " + e.getVersion() + "; "; - } - String ours = ""; - for (var e : md.getChangelog().entrySet()) { - ours += e.getKey() + ": " + e.getValue() + "; "; - } - Log.trace("Handling update: " + header.getName() + " from " + from + "\n" + "ours: " + ours + " \n" + "received: " + rcv); - - boolean updatedRemoteVersion = false; - - var oldRemoteVer = md.getRemoteCopies().put(from, receivedTotalVer); - if (oldRemoteVer == null || !oldRemoteVer.equals(receivedTotalVer)) updatedRemoteVersion = true; - - boolean hasLower = false; - boolean hasHigher = false; - for (var e : Stream.concat(md.getChangelog().keySet().stream(), receivedMap.keySet().stream()).collect(Collectors.toSet())) { - if (receivedMap.getOrDefault(e, 0L) < md.getChangelog().getOrDefault(e, 0L)) - hasLower = true; - if (receivedMap.getOrDefault(e, 0L) > md.getChangelog().getOrDefault(e, 0L)) - hasHigher = true; - } - - if (hasLower && hasHigher) { - Log.info("Conflict on update (inconsistent version): " + header.getName() + " from " + from); - return true; - } - - if (hasLower) { - Log.info("Received older index update than known: " - + from + " " + header.getName()); - throw new OutdatedUpdateException(); - } - - if (hasHigher) { - invalidate.apply(); - md.getChangelog().clear(); - md.getChangelog().putAll(receivedMap); - md.getChangelog().putIfAbsent(persistentPeerDataService.getSelfUuid(), 0L); - if (header.hasPushedData()) - found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); - return false; - } else if (data == null && header.hasPushedData()) { - found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (found.getData() == null) - found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); - } - - assert Objects.equals(receivedTotalVer, md.getOurVersion()); - - if (!updatedRemoteVersion) - Log.debug("No action on update: " + header.getName() + " from " + from); - - return false; - }); + public void doInitialSync(PeerId peer) { + txm.run(() -> { + for (var cur : curTx.findAllObjects()) invalidationQueueService.pushInvalidationToOne(peer, cur, true); }); - - // TODO: Is the lock gap here ok? - if (conflict) { - Log.info("Trying conflict resolution: " + header.getName() + " from " + from); - var found = foundExt.get(); - - JObjectData theirsData; - ObjectHeader theirsHeader; - if (header.hasPushedData()) { - theirsHeader = header; - theirsData = dataProtoSerializer.deserialize(header.getPushedData()); - } else { - var got = remoteObjectServiceClient.getSpecificObject(from, header.getName()); - theirsData = dataProtoSerializer.deserialize(got.getRight()); - theirsHeader = got.getLeft(); - } - - jObjectTxManager.executeTx(() -> { - var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d == null) - throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); - return d.getConflictResolver(); - }); - var resolver = conflictResolvers.select(resolverClass); - resolver.get().resolve(from, theirsHeader, theirsData, found); - }); - Log.info("Resolved conflict for " + from + " " + header.getName()); - } - - } - - public IndexUpdateReply handleRemoteUpdate(IndexUpdatePush request) { - // TODO: Dedup - try { - handleOneUpdate(UUID.fromString(request.getSelfUuid()), request.getHeader()); - } catch (OutdatedUpdateException ignored) { - Log.warn("Outdated update of " + request.getHeader().getName() + " from " + request.getSelfUuid()); - invalidationQueueService.pushInvalidationToOne(UUID.fromString(request.getSelfUuid()), request.getHeader().getName()); - } catch (Exception ex) { - Log.info("Error when handling update from " + request.getSelfUuid() + " of " + request.getHeader().getName(), ex); - throw ex; - } - - return IndexUpdateReply.getDefaultInstance(); - } - - protected static class OutdatedUpdateException extends RuntimeException { - OutdatedUpdateException() { - super(); - } - - OutdatedUpdateException(String message) { - super(message); - } - - @Override - public synchronized Throwable fillInStackTrace() { - return this; - } } } \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHelper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHelper.java new file mode 100644 index 00000000..ceb391e2 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHelper.java @@ -0,0 +1,42 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.PeerId; +import org.pcollections.PMap; + +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class SyncHelper { + public enum ChangelogCmpResult { + EQUAL, + NEWER, + OLDER, + CONFLICT + } + + public static ChangelogCmpResult compareChangelogs(PMap current, PMap other) { + boolean hasLower = false; + boolean hasHigher = false; + for (var e : Stream.concat(current.keySet().stream(), other.keySet().stream()).collect(Collectors.toUnmodifiableSet())) { + if (other.getOrDefault(e, 0L) < current.getOrDefault(e, 0L)) + hasLower = true; + if (other.getOrDefault(e, 0L) > current.getOrDefault(e, 0L)) + hasHigher = true; + } + + if (hasLower && hasHigher) + return ChangelogCmpResult.CONFLICT; + + if (hasLower) + return ChangelogCmpResult.OLDER; + + if (hasHigher) + return ChangelogCmpResult.NEWER; + + return ChangelogCmpResult.EQUAL; + } + +// public static PMap mergeChangelogs(PMap current, PMap other) { +// return current.plusAll(other); +// } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TemporaryReceivedObjectSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TemporaryReceivedObjectSerializer.java new file mode 100644 index 00000000..741026f9 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TemporaryReceivedObjectSerializer.java @@ -0,0 +1,43 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.ReceivedObject; +import com.usatiuk.dhfs.objects.persistence.JDataRemoteP; +import com.usatiuk.dhfs.objects.persistence.JObjectKeyP; +import com.usatiuk.dhfs.objects.persistence.PeerIdP; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import org.pcollections.HashTreePMap; +import org.pcollections.PMap; + +@Singleton +public class TemporaryReceivedObjectSerializer implements ProtoSerializer { + @Inject + ProtoSerializer remoteObjectSerializer; + + @Override + public ReceivedObject deserialize(GetObjectReply message) { + PMap changelog = HashTreePMap.empty(); + for (var entry : message.getChangelog().getEntriesList()) { + changelog = changelog.plus(PeerId.of(entry.getKey().getId().getName()), entry.getValue()); + } + var data = remoteObjectSerializer.deserialize(message.getPushedData()); + return new ReceivedObject(changelog, data); + } + + @Override + public GetObjectReply serialize(ReceivedObject object) { + var builder = GetObjectReply.newBuilder(); + var changelogBuilder = builder.getChangelogBuilder(); + object.changelog().forEach((peer, version) -> { + changelogBuilder.addEntriesBuilder() + .setKey(PeerIdP.newBuilder().setId(JObjectKeyP.newBuilder().setName(peer.id().toString()).build()).build()) + .setValue(version); + }); + builder.setPushedData(remoteObjectSerializer.serialize(object.data())); + return builder.build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java index 63f3e7a1..83a5144b 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects.repository.invalidation; -import lombok.Getter; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; import org.apache.commons.collections4.MultiValuedMap; import org.apache.commons.collections4.multimap.HashSetValuedHashMap; @@ -12,6 +13,5 @@ public class DeferredInvalidationQueueData implements Serializable { @Serial private static final long serialVersionUID = 1L; - @Getter - private final MultiValuedMap _deferredInvalidations = new HashSetValuedHashMap<>(); + public final MultiValuedMap deferredInvalidations = new HashSetValuedHashMap<>(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java index 575f65dc..d8e68d98 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java @@ -1,7 +1,8 @@ package com.usatiuk.dhfs.objects.repository.invalidation; -import com.usatiuk.dhfs.SerializationHelper; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.PeerManager; +import com.usatiuk.dhfs.utils.SerializationHelper; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; @@ -17,9 +18,6 @@ import org.eclipse.microprofile.config.inject.ConfigProperty; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; -import java.util.UUID; - -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; @ApplicationScoped public class DeferredInvalidationQueueService { @@ -28,9 +26,8 @@ public class DeferredInvalidationQueueService { PeerManager remoteHostManager; @Inject InvalidationQueueService invalidationQueueService; - @ConfigProperty(name = "dhfs.objects.root") + @ConfigProperty(name = "dhfs.objects.persistence.files.root") String dataRoot; - // FIXME: DB when? private DeferredInvalidationQueueData _persistentData = new DeferredInvalidationQueueData(); void init(@Observes @Priority(290) StartupEvent event) throws IOException { @@ -39,11 +36,8 @@ public class DeferredInvalidationQueueService { if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { Log.info("Reading invalidation queue"); _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); - } else if (Paths.get(dataRoot).resolve(dataFileName + ".bak").toFile().exists()) { - Log.warn("Reading invalidation queue from backup"); - _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); } - remoteHostManager.registerConnectEventListener(this::returnForHost); +// remoteHostManager.registerConnectEventListener(this::returnForHost); } void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException { @@ -52,11 +46,8 @@ public class DeferredInvalidationQueueService { Log.info("Saved deferred invalidations"); } - private void writeData() { try { - if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) - Files.move(Paths.get(dataRoot).resolve(dataFileName), Paths.get(dataRoot).resolve(dataFileName + ".bak"), REPLACE_EXISTING); Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData)); } catch (IOException iex) { Log.error("Error writing deferred invalidations data", iex); @@ -72,21 +63,21 @@ public class DeferredInvalidationQueueService { returnForHost(reachable); } - void returnForHost(UUID host) { + void returnForHost(PeerId host) { synchronized (this) { - var col = _persistentData.getDeferredInvalidations().get(host); + var col = _persistentData.deferredInvalidations.get(host); for (var s : col) { - Log.trace("Un-deferred invalidation to " + host + " of " + s); - invalidationQueueService.pushDeferredInvalidations(host, s); + Log.tracev("Returning deferred invalidation: {0}", s); + invalidationQueueService.pushDeferredInvalidations(s); } col.clear(); } } - void defer(UUID host, String object) { + void defer(InvalidationQueueEntry entry) { synchronized (this) { - Log.trace("Deferred invalidation to " + host + " of " + object); - _persistentData.getDeferredInvalidations().put(host, object); + Log.tracev("Deferred invalidation: {0}", entry); + _persistentData.deferredInvalidations.put(entry.peer(), entry); } } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java new file mode 100644 index 00000000..38e1aef3 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java @@ -0,0 +1,16 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.RemoteObjectMeta; +import org.pcollections.PMap; + +import java.util.Collection; +import java.util.List; + +public record IndexUpdateOp(JObjectKey key, PMap changelog) implements Op { + @Override + public Collection getEscapedRefs() { + return List.of(key); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java new file mode 100644 index 00000000..04ea3853 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; + +import java.io.Serializable; + +public record InvalidationQueueEntry(PeerId peer, JObjectKey key, boolean forced) implements Serializable { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java index f754457c..cb31e2b0 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java @@ -1,12 +1,10 @@ package com.usatiuk.dhfs.objects.repository.invalidation; -import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.PeerManager; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import com.usatiuk.utils.HashSetDelayedBlockingQueue; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; @@ -16,10 +14,8 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; -import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -27,18 +23,16 @@ import java.util.concurrent.atomic.AtomicReference; @ApplicationScoped public class InvalidationQueueService { - private final HashSetDelayedBlockingQueue> _queue; - private final AtomicReference> _toAllQueue = new AtomicReference<>(new ConcurrentHashSet<>()); + private final HashSetDelayedBlockingQueue _queue; + private final AtomicReference> _toAllQueue = new AtomicReference<>(new ConcurrentHashSet<>()); @Inject PeerManager remoteHostManager; @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - @Inject - JObjectManager jObjectManager; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject DeferredInvalidationQueueService deferredInvalidationQueueService; + @Inject + PeerInfoService peerInfoService; + @Inject + OpPusher opPusher; @ConfigProperty(name = "dhfs.objects.invalidation.threads") int threads; private ExecutorService _executor; @@ -69,7 +63,7 @@ public class InvalidationQueueService { var data = _queue.close(); Log.info("Will defer " + data.size() + " invalidations on shutdown"); for (var e : data) - deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); + deferredInvalidationQueueService.defer(e); } private void sender() { @@ -77,7 +71,7 @@ public class InvalidationQueueService { try { try { if (!_queue.hasImmediate()) { - ConcurrentHashSet toAllQueue; + ConcurrentHashSet toAllQueue; while (true) { toAllQueue = _toAllQueue.get(); @@ -93,9 +87,9 @@ public class InvalidationQueueService { var hostInfo = remoteHostManager.getHostStateSnapshot(); for (var o : toAllQueue) { for (var h : hostInfo.available()) - _queue.add(Pair.of(h, o)); + _queue.add(new InvalidationQueueEntry(h, o, false)); for (var u : hostInfo.unavailable()) - deferredInvalidationQueueService.defer(u, o); + deferredInvalidationQueueService.defer(new InvalidationQueueEntry(u, o, false)); } } } @@ -106,22 +100,19 @@ public class InvalidationQueueService { long success = 0; for (var e : data) { - if (!persistentPeerDataService.existsHost(e.getLeft())) continue; + if (peerInfoService.getPeerInfo(e.peer()).isEmpty()) continue; - if (!remoteHostManager.isReachable(e.getLeft())) { - deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); + if (!remoteHostManager.isReachable(e.peer())) { + deferredInvalidationQueueService.defer(e); continue; } try { - jObjectManager.get(e.getRight()).ifPresent(obj -> { - remoteObjectServiceClient.notifyUpdate(obj, e.getLeft()); - }); + opPusher.doPush(e); success++; - } catch (DeletedObjectAccessException ignored) { } catch (Exception ex) { - Log.info("Failed to send invalidation to " + e.getLeft() + ", will retry", ex); - pushInvalidationToOne(e.getLeft(), e.getRight()); + Log.warnv("Failed to send invalidation to {0}, will retry: {1}", e, ex); + pushInvalidationToOne(e); } if (_shutdown) { Log.info("Invalidation sender exiting"); @@ -142,39 +133,38 @@ public class InvalidationQueueService { Log.info("Invalidation sender exiting"); } - public void pushInvalidationToAll(JObject obj) { - if (obj.getMeta().isOnlyLocal()) return; + public void pushInvalidationToAll(JObjectKey key) { while (true) { var queue = _toAllQueue.get(); if (queue == null) { - var nq = new ConcurrentHashSet(); + var nq = new ConcurrentHashSet(); if (!_toAllQueue.compareAndSet(null, nq)) continue; queue = nq; } - queue.add(obj.getMeta().getName()); + queue.add(key); if (_toAllQueue.get() == queue) break; } } - public void pushInvalidationToOne(UUID host, JObject obj) { - if (obj.getMeta().isOnlyLocal()) return; - if (remoteHostManager.isReachable(host)) - _queue.add(Pair.of(host, obj.getMeta().getName())); + void pushInvalidationToOne(InvalidationQueueEntry entry) { + if (remoteHostManager.isReachable(entry.peer())) + _queue.add(entry); else - deferredInvalidationQueueService.defer(host, obj.getMeta().getName()); + deferredInvalidationQueueService.defer(entry); } - public void pushInvalidationToAll(String name) { - pushInvalidationToAll(jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); + public void pushInvalidationToOne(PeerId host, JObjectKey obj, boolean forced) { + var entry = new InvalidationQueueEntry(host, obj, forced); + pushInvalidationToOne(entry); } - public void pushInvalidationToOne(UUID host, String name) { - pushInvalidationToOne(host, jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); + public void pushInvalidationToOne(PeerId host, JObjectKey obj) { + pushInvalidationToOne(host, obj, false); } - protected void pushDeferredInvalidations(UUID host, String name) { - _queue.add(Pair.of(host, name)); + void pushDeferredInvalidations(InvalidationQueueEntry entry) { + _queue.add(entry); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java new file mode 100644 index 00000000..143bfcf3 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.JObjectKey; + +import java.io.Serializable; +import java.util.Collection; + +public interface Op extends Serializable { + Collection getEscapedRefs(); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpHandler.java new file mode 100644 index 00000000..6b78661a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpHandler.java @@ -0,0 +1,27 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class OpHandler { + @Inject + PushOpHandler pushOpHandler; + @Inject + Transaction curTx; + @Inject + JKleppmannTreeManager jKleppmannTreeManager; + + public void handleOp(PeerId from, Op op) { + if (op instanceof IndexUpdateOp iu) { + pushOpHandler.handlePush(from, iu); + } else if (op instanceof JKleppmannTreeOpWrapper jk) { + var tree = jKleppmannTreeManager.getTree(jk.treeName()); + tree.acceptExternalOp(from, jk); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java new file mode 100644 index 00000000..d1b39846 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java @@ -0,0 +1,79 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.RemoteObjectMeta; +import com.usatiuk.dhfs.objects.RemoteTransaction; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; +import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.List; + +@ApplicationScoped +public class OpPusher { + @Inject + Transaction curTx; + @Inject + TransactionManager txm; + @Inject + RemoteTransaction remoteTransaction; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + @Inject + InvalidationQueueService invalidationQueueService; + @Inject + JKleppmannTreeManager jKleppmannTreeManager; + + public void doPush(InvalidationQueueEntry entry) { + List info = txm.run(() -> { + var obj = curTx.get(JData.class, entry.key()).orElse(null); + switch (obj) { + case RemoteObjectMeta remote -> { + return List.of(new IndexUpdateOp(entry.key(), remote.changelog())); + } + case JKleppmannTreePersistentData pd -> { + var tree = jKleppmannTreeManager.getTree(pd.key()); + if (entry.forced()) + tree.recordBootstrap(entry.peer()); + + if (!tree.hasPendingOpsForHost(entry.peer())) + return null; + + var ops = tree.getPendingOpsForHost(entry.peer(), 1); + + if (tree.hasPendingOpsForHost(entry.peer())) + invalidationQueueService.pushInvalidationToOne(entry.peer(), pd.key()); + + return ops; + } + case null, + default -> { + return null; + } + } + }); + if (info == null) { + return; + } + remoteObjectServiceClient.pushOps(entry.peer(), info); + txm.run(() -> { + var obj = curTx.get(JData.class, entry.key()).orElse(null); + switch (obj) { + case JKleppmannTreePersistentData pd: { + var tree = jKleppmannTreeManager.getTree(pd.key()); + for (var op : info) { + tree.commitOpForHost(entry.peer(), op); + } + break; + } + case null: + default: + } + }); + + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java new file mode 100644 index 00000000..b7c4c48d --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java @@ -0,0 +1,22 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.RemoteTransaction; +import com.usatiuk.dhfs.objects.repository.SyncHandler; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class PushOpHandler { + @Inject + Transaction curTx; + @Inject + SyncHandler syncHandler; + @Inject + RemoteTransaction remoteTransaction; + + public void handlePush(PeerId peer, IndexUpdateOp obj) { + syncHandler.handleRemoteUpdate(peer, obj.key(), obj.changelog(), null); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/IpPeerAddress.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/IpPeerAddress.java new file mode 100644 index 00000000..0d6ab1da --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/IpPeerAddress.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +import com.usatiuk.dhfs.objects.PeerId; + +import java.net.InetAddress; + +public record IpPeerAddress(PeerId peer, PeerAddressType type, + InetAddress address, int port, int securePort) implements PeerAddress { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddress.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddress.java new file mode 100644 index 00000000..81824de5 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddress.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +import com.usatiuk.dhfs.objects.PeerId; + +public interface PeerAddress { + PeerId peer(); + PeerAddressType type(); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddressType.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddressType.java new file mode 100644 index 00000000..b5027e4b --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddressType.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +public enum PeerAddressType { + LAN, + WAN, + PROXY +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java new file mode 100644 index 00000000..1021add5 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java @@ -0,0 +1,72 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +import com.usatiuk.dhfs.objects.PeerId; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import org.apache.commons.collections4.MultiValuedMap; +import org.apache.commons.collections4.multimap.HashSetValuedHashMap; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.util.Collection; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +@ApplicationScoped +public class PeerDiscoveryDirectory { + @ConfigProperty(name = "dhfs.peerdiscovery.timeout") + long timeout; + + private record PeerEntry(PeerAddress addr, long lastSeen) { + public PeerEntry withLastSeen(long lastSeen) { + return new PeerEntry(addr, lastSeen); + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) return false; + PeerEntry peerEntry = (PeerEntry) o; + return Objects.equals(addr, peerEntry.addr); + } + + @Override + public int hashCode() { + return Objects.hashCode(addr); + } + } + + private final MultiValuedMap _entries = new HashSetValuedHashMap<>(); + + public void notifyAddr(PeerAddress addr) { + Log.tracev("New address {0}", addr); + synchronized (_entries) { + var peer = addr.peer(); + _entries.removeMapping(peer, new PeerEntry(addr, 0)); + _entries.put(peer, new PeerEntry(addr, System.currentTimeMillis())); + } + } + + public Collection getForPeer(PeerId peer) { + synchronized (_entries) { + long curTime = System.currentTimeMillis(); + var partitioned = _entries.asMap().get(peer).stream() + .collect(Collectors.partitioningBy(e -> e.lastSeen() + timeout < curTime)); + for (var entry : partitioned.get(true)) { + _entries.removeMapping(peer, entry); + } + return partitioned.get(false).stream().map(PeerEntry::addr).toList(); + } + } + + public Collection getReachablePeers() { + synchronized (_entries) { + long curTime = System.currentTimeMillis(); + var partitioned = _entries.entries().stream() + .collect(Collectors.partitioningBy(e -> e.getValue().lastSeen() + timeout < curTime)); + for (var entry : partitioned.get(true)) { + _entries.removeMapping(entry.getKey(), entry.getValue()); + } + return partitioned.get(false).stream().map(Map.Entry::getKey).collect(Collectors.toUnmodifiableSet()); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/StaticPeerDiscovery.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/StaticPeerDiscovery.java new file mode 100644 index 00000000..f201c132 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/StaticPeerDiscovery.java @@ -0,0 +1,46 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +import com.usatiuk.dhfs.objects.PeerId; +import io.quarkus.scheduler.Scheduled; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.stream.Stream; + +@ApplicationScoped +public class StaticPeerDiscovery { + private final List _peers; + + public StaticPeerDiscovery(@ConfigProperty(name = "dhfs.peerdiscovery.static-peers") Optional staticPeers) { + var peers = staticPeers.orElse(""); + _peers = Arrays.stream(peers.split(",")).flatMap(e -> + { + if (e.isEmpty()) { + return Stream.of(); + } + var split = e.split(":"); + try { + return Stream.of(new IpPeerAddress(PeerId.of(split[0]), PeerAddressType.LAN, InetAddress.getByName(split[1]), + Integer.parseInt(split[2]), Integer.parseInt(split[3]))); + } catch (UnknownHostException ex) { + throw new RuntimeException(ex); + } + }).toList(); + } + + @Inject + PeerDiscoveryDirectory peerDiscoveryDirectory; + + @Scheduled(every = "1s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) + public void discoverPeers() { + for (var peer : _peers) { + peerDiscoveryDirectory.notifyAddr(peer); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java new file mode 100644 index 00000000..7b8362f5 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java @@ -0,0 +1,104 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery.local; + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryInfo; +import io.quarkus.arc.properties.IfBuildProperty; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.Startup; +import io.quarkus.scheduler.Scheduled; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.net.*; + +@ApplicationScoped +@IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true") +public class LocalPeerDiscoveryBroadcaster { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @ConfigProperty(name = "quarkus.http.port") + int ourPort; + + @ConfigProperty(name = "quarkus.http.ssl-port") + int ourSecurePort; + + @ConfigProperty(name = "dhfs.objects.peerdiscovery.port") + int broadcastPort; + + @ConfigProperty(name = "dhfs.objects.peerdiscovery.broadcast") + boolean enabled; + + private DatagramSocket _socket; + + @Startup + void init() throws SocketException { + if (!enabled) { + return; + } + _socket = new DatagramSocket(); + _socket.setBroadcast(true); + } + + void shutdown(@Observes @Priority(10) ShutdownEvent event) { + if (!enabled) { + return; + } + _socket.close(); + } + + @Scheduled(every = "${dhfs.objects.peerdiscovery.interval}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) + public void broadcast() throws Exception { + if (!enabled) { + return; + } + var sendData = PeerDiscoveryInfo.newBuilder() + .setUuid(persistentPeerDataService.getSelfUuid().toString()) + .setPort(ourPort) + .setSecurePort(ourSecurePort) + .build(); + + var sendBytes = sendData.toByteArray(); + + DatagramPacket sendPacket + = new DatagramPacket(sendBytes, sendBytes.length, + InetAddress.getByName("255.255.255.255"), broadcastPort); + + _socket.send(sendPacket); + + var interfaces = NetworkInterface.getNetworkInterfaces(); + while (interfaces.hasMoreElements()) { + NetworkInterface networkInterface = interfaces.nextElement(); + + try { + if (networkInterface.isLoopback() || !networkInterface.isUp()) { + continue; + } + } catch (Exception e) { + continue; + } + + for (InterfaceAddress interfaceAddress : networkInterface.getInterfaceAddresses()) { + InetAddress broadcast = interfaceAddress.getBroadcast(); + if (broadcast == null) { + continue; + } + + try { + sendPacket = new DatagramPacket(sendBytes, sendBytes.length, broadcast, broadcastPort); + _socket.send(sendPacket); + Log.tracev("Broadcast sent to: {0}, at: {1}", broadcast.getHostAddress(), networkInterface.getDisplayName()); + } catch (Exception ignored) { + continue; + } + +// Log.trace(getClass().getName() + "Broadcast sent to: " + broadcast.getHostAddress() +// + ", at: " + networkInterface.getDisplayName()); + } + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java new file mode 100644 index 00000000..ce412b6b --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java @@ -0,0 +1,91 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery.local; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.IpPeerAddress; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerAddressType; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryDirectory; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryInfo; +import io.quarkus.arc.properties.IfBuildProperty; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.Startup; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.net.*; +import java.nio.ByteBuffer; + +@ApplicationScoped +@IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true") +public class LocalPeerDiscoveryClient { + @Inject + PeerDiscoveryDirectory peerDiscoveryDirectory; + + private Thread _clientThread; + + private DatagramSocket _socket; + + @ConfigProperty(name = "dhfs.objects.peerdiscovery.broadcast") + boolean enabled; + + @Startup + void init() throws SocketException, UnknownHostException { + if (!enabled) { + return; + } + _socket = new DatagramSocket(42069, InetAddress.getByName("0.0.0.0")); + _socket.setBroadcast(true); + + _clientThread = new Thread(this::client); + _clientThread.setName("LocalPeerDiscoveryClient"); + _clientThread.start(); + } + + void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException { + if (!enabled) { + return; + } + _socket.close(); + _clientThread.interrupt(); + _clientThread.interrupt(); + while (_clientThread.isAlive()) { + try { + _clientThread.join(); + } catch (InterruptedException ignored) { + } + } + } + + private void client() { + while (!Thread.interrupted() && !_socket.isClosed()) { + try { + byte[] buf = new byte[10000]; + DatagramPacket packet = new DatagramPacket(buf, buf.length); + _socket.receive(packet); + + try { + var got = PeerDiscoveryInfo.parseFrom(ByteBuffer.wrap(buf, 0, packet.getLength())); + Log.tracev("Got peer discovery packet from {0}", packet.getAddress()); + peerDiscoveryDirectory.notifyAddr( + new IpPeerAddress( + PeerId.of(got.getUuid()), + PeerAddressType.LAN, + packet.getAddress(), + got.getPort(), + got.getSecurePort() + ) + ); + } catch (InvalidProtocolBufferException e) { + continue; + } + } catch (Exception ex) { + Log.error(ex); + } + } + Log.info("PeerDiscoveryClient stopped"); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java index e51e4d02..7b2d8a59 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java @@ -1,4 +1,27 @@ package com.usatiuk.dhfs.objects.repository.peersync; -public record PeerInfo(String selfUuid, String cert) { +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.CertificateTools; + +import java.security.cert.X509Certificate; + +public record PeerInfo(JObjectKey key, PeerId id, ByteString cert) implements JDataRemote { + public PeerInfo(PeerId id, byte[] cert) { + this(id.toJObjectKey(), id, ByteString.copyFrom(cert)); + } + + public X509Certificate parsedCert() { + return CertificateTools.certFromBytes(cert.toByteArray()); + } + + @Override + public String toString() { + return "PeerInfo{" + + "key=" + key + + ", id=" + id + + '}'; + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java new file mode 100644 index 00000000..783d391a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java @@ -0,0 +1,95 @@ +package com.usatiuk.dhfs.objects.repository.peersync; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.RemoteTransaction; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.peersync.structs.JKleppmannTreeNodeMetaPeer; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.List; +import java.util.Optional; + +@ApplicationScoped +public class PeerInfoService { + @Inject + Transaction curTx; + @Inject + TransactionManager jObjectTxManager; + @Inject + JKleppmannTreeManager jKleppmannTreeManager; + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + RemoteTransaction remoteTx; + + private JKleppmannTreeManager.JKleppmannTree getTree() { + return jKleppmannTreeManager.getTree(JObjectKey.of("peers")); + } + + public Optional getPeerInfo(PeerId peer) { + return jObjectTxManager.run(() -> { + var gotKey = getTree().traverse(List.of(peer.toString())); + if (gotKey == null) { + return Optional.empty(); + } + return curTx.get(JKleppmannTreeNode.class, gotKey).flatMap(node -> { + var meta = (JKleppmannTreeNodeMetaPeer) node.meta(); + return remoteTx.getData(PeerInfo.class, meta.getPeerId()); + }); + }); + } + + public List getPeers() { + return jObjectTxManager.run(() -> { + var gotKey = getTree().traverse(List.of()); + return curTx.get(JKleppmannTreeNode.class, gotKey).map( + node -> node.children().keySet().stream() + .map(PeerId::of).map(this::getPeerInfo) + .map(Optional::get).toList()) + .orElseThrow(); + }); + } + + public List getPeersNoSelf() { + return jObjectTxManager.run(() -> { + var gotKey = getTree().traverse(List.of()); + return curTx.get(JKleppmannTreeNode.class, gotKey).map( + node -> node.children().keySet().stream() + .map(PeerId::of).map(this::getPeerInfo) + .map(Optional::get).filter( + peerInfo -> !peerInfo.id().equals(persistentPeerDataService.getSelfUuid())).toList()) + .orElseThrow(); + }); + } + + public void putPeer(PeerId id, byte[] cert) { + jObjectTxManager.run(() -> { + var parent = getTree().traverse(List.of()); + var newPeerInfo = new PeerInfo(id, cert); + remoteTx.putData(newPeerInfo); + getTree().move(parent, new JKleppmannTreeNodeMetaPeer(newPeerInfo.id()), getTree().getNewNodeId()); + }); + } + + public void removePeer(PeerId id) { + jObjectTxManager.run(() -> { + var gotKey = getTree().traverse(List.of(id.toString())); + if (gotKey == null) { + return; + } + var meta = curTx.get(JKleppmannTreeNode.class, gotKey).map(node -> (JKleppmannTreeNodeMetaPeer) node.meta()).orElse(null); + if (meta == null) { + Log.warn("Peer " + id + " not found in the tree"); + return; + } + getTree().trash(meta, id.toJObjectKey()); + }); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/ApiPeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/ApiPeerInfo.java new file mode 100644 index 00000000..e84f4d83 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/ApiPeerInfo.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.peersync.api; + +public record ApiPeerInfo(String selfUuid, String cert) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApi.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApi.java new file mode 100644 index 00000000..f3d07189 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApi.java @@ -0,0 +1,26 @@ +package com.usatiuk.dhfs.objects.repository.peersync.api; + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import jakarta.inject.Inject; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; + +import java.security.cert.CertificateEncodingException; +import java.util.Base64; + +@Path("/peer-info") +public class PeerSyncApi { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Path("self") + @GET + public ApiPeerInfo getSelfInfo() { + try { + return new ApiPeerInfo(persistentPeerDataService.getSelfUuid().toString(), + Base64.getEncoder().encodeToString(persistentPeerDataService.getSelfCertificate().getEncoded())); + } catch (CertificateEncodingException e) { + throw new RuntimeException(e); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClient.java new file mode 100644 index 00000000..49a04ac6 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClient.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects.repository.peersync.api; + +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; + +@Path("/peer-info") +public interface PeerSyncApiClient { + @Path("self") + @GET + ApiPeerInfo getSelfInfo(); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClientDynamic.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClientDynamic.java new file mode 100644 index 00000000..c09262e9 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClientDynamic.java @@ -0,0 +1,28 @@ +package com.usatiuk.dhfs.objects.repository.peersync.api; + +import com.usatiuk.dhfs.objects.repository.peerdiscovery.IpPeerAddress; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerAddress; +import io.quarkus.rest.client.reactive.QuarkusRestClientBuilder; +import jakarta.enterprise.context.ApplicationScoped; + +import java.net.URI; +import java.util.concurrent.TimeUnit; + +@ApplicationScoped +public class PeerSyncApiClientDynamic { + public ApiPeerInfo getSelfInfo(PeerAddress addr) { + return switch (addr) { + case IpPeerAddress ipAddr -> getSelfInfo(ipAddr.address().getHostAddress(), ipAddr.port()); + default -> throw new IllegalArgumentException("Unsupported peer address type: " + addr.getClass()); + }; + } + + private ApiPeerInfo getSelfInfo(String address, int port) { + var client = QuarkusRestClientBuilder.newBuilder() + .baseUri(URI.create("http://" + address + ":" + port)) + .connectTimeout(5, TimeUnit.SECONDS) + .readTimeout(5, TimeUnit.SECONDS) + .build(PeerSyncApiClient.class); + return client.getSelfInfo(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/structs/JKleppmannTreeNodeMetaPeer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/structs/JKleppmannTreeNodeMetaPeer.java new file mode 100644 index 00000000..a9ea1800 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/structs/JKleppmannTreeNodeMetaPeer.java @@ -0,0 +1,41 @@ +package com.usatiuk.dhfs.objects.repository.peersync.structs; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; + +import java.util.Objects; + +//@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) +public class JKleppmannTreeNodeMetaPeer extends JKleppmannTreeNodeMeta { + private final JObjectKey _peerId; + + public JKleppmannTreeNodeMetaPeer(PeerId id) { + super(id.toString()); + _peerId = id.toJObjectKey(); + } + + public JObjectKey getPeerId() { + return _peerId; + } + + @Override + public JKleppmannTreeNodeMeta withName(String name) { + assert false; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + JKleppmannTreeNodeMetaPeer that = (JKleppmannTreeNodeMetaPeer) o; + return Objects.equals(_peerId, that._peerId); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), _peerId); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java index 2d3914f0..d2e1c4bf 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects.repository.peertrust; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; import io.quarkus.logging.Log; import io.quarkus.security.credential.CertificateCredential; import io.quarkus.security.identity.AuthenticationRequestContext; @@ -11,13 +12,12 @@ import io.smallrye.mutiny.Uni; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import java.util.UUID; import java.util.function.Supplier; @ApplicationScoped public class PeerRolesAugmentor implements SecurityIdentityAugmentor { @Inject - PersistentPeerDataService persistentPeerDataService; + PeerInfoService peerInfoService; @Override public Uni augment(SecurityIdentity identity, AuthenticationRequestContext context) { @@ -33,9 +33,9 @@ public class PeerRolesAugmentor implements SecurityIdentityAugmentor { var uuid = identity.getPrincipal().getName().substring(3); try { - var entry = persistentPeerDataService.getHost(UUID.fromString(uuid)); + var entry = peerInfoService.getPeerInfo(PeerId.of(uuid)); - if (!entry.getCertificate().equals(identity.getCredential(CertificateCredential.class).getCertificate())) { + if (!entry.get().parsedCert().equals(identity.getCredential(CertificateCredential.class).getCertificate())) { Log.error("Certificate mismatch for " + uuid); return () -> identity; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java index ae0d8359..26573abb 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.repository.peertrust; -import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfo; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import org.apache.commons.lang3.tuple.Pair; @@ -8,9 +8,7 @@ import org.apache.commons.lang3.tuple.Pair; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; -import java.io.IOException; import java.security.KeyStore; -import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; @@ -36,17 +34,17 @@ public class PeerTrustManager implements X509TrustManager { return trustManager.get().getAcceptedIssuers(); } - public synchronized void reloadTrustManagerHosts(Collection hosts) { + public synchronized void reloadTrustManagerHosts(Collection hosts) { try { Log.info("Trying to reload trust manager: " + hosts.size() + " known hosts"); reloadTrustManager(hosts.stream().map(hostInfo -> - Pair.of(hostInfo.getUuid().toString(), hostInfo.getCertificate())).toList()); + Pair.of(hostInfo.id().toString(), hostInfo.parsedCert())).toList()); } catch (Exception e) { throw new RuntimeException(e); } } - private synchronized void reloadTrustManager(Collection> certs) throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException { + private synchronized void reloadTrustManager(Collection> certs) throws Exception { KeyStore ts = KeyStore.getInstance(KeyStore.getDefaultType()); ts.load(null, null); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java index 3232b9f0..0dbf2687 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java @@ -1,4 +1,4 @@ package com.usatiuk.dhfs.objects.repository.webapi; -public record AvailablePeerInfo(String uuid, String addr, int port) { +public record AvailablePeerInfo(String uuid) { } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java index 4d8a3102..43c1aa88 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java @@ -1,7 +1,8 @@ package com.usatiuk.dhfs.objects.repository.webapi; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.PeerManager; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; import jakarta.inject.Inject; import jakarta.ws.rs.DELETE; import jakarta.ws.rs.GET; @@ -10,37 +11,35 @@ import jakarta.ws.rs.Path; import java.util.Collection; import java.util.List; -import java.util.UUID; @Path("/objects-manage") public class ManagementApi { @Inject - PeerManager remoteHostManager; - + PeerInfoService peerInfoService; @Inject - PersistentPeerDataService persistentPeerDataService; + PeerManager peerManager; @Path("known-peers") @GET public List knownPeers() { - return persistentPeerDataService.getHostsNoNulls().stream().map(h -> new KnownPeerInfo(h.getUuid().toString())).toList(); + return peerInfoService.getPeers().stream().map(peerInfo -> new KnownPeerInfo(peerInfo.id().toString())).toList(); } @Path("known-peers") @PUT public void addPeer(KnownPeerPut knownPeerPut) { - remoteHostManager.addRemoteHost(UUID.fromString(knownPeerPut.uuid())); + peerManager.addRemoteHost(PeerId.of(knownPeerPut.uuid())); } @Path("known-peers") @DELETE - public void DeletePeer(KnownPeerDelete knownPeerDelete) { - remoteHostManager.removeRemoteHost(UUID.fromString(knownPeerDelete.uuid())); + public void deletePeer(KnownPeerDelete knownPeerDelete) { + peerManager.removeRemoteHost(PeerId.of(knownPeerDelete.uuid())); } @Path("available-peers") @GET public Collection availablePeers() { - return remoteHostManager.getSeenButNotAddedHosts(); + return peerManager.getSeenButNotAddedHosts(); } } diff --git a/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto b/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto index 0f93fdd5..4681a0be 100644 --- a/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto +++ b/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto @@ -6,150 +6,28 @@ option java_outer_classname = "DhfsObjectPersistence"; package dhfs.objects.persistence; -message ObjectMetadataP { - string name = 1; - map remoteCopies = 2; - string knownClass = 3; - bool seen = 4; - bool deleted = 5; - repeated string confirmedDeletes = 6; - repeated string referrers = 7; - map changelog = 8; - repeated string savedRefs = 9; - bool frozen = 10; - bool haveLocalCopy = 11; -} - -message FsNodeP { - string uuid = 1; - int64 mode = 2; - int64 ctime = 3; - int64 mtime = 4; -} - -message FilePChunksEntry { - int64 start = 1; - string id = 2; -} - -message FileP { - FsNodeP fsNode = 1; - repeated FilePChunksEntry chunks = 2; - bool symlink = 3; - int64 size = 4; -} - -message DirectoryP { - FsNodeP fsNode = 1; - map children = 2; -} - -message ChunkDataP { - string name = 1; - bytes data = 2; -} - -message PeerDirectoryP { - repeated string peers = 1; -} - -message PersistentPeerInfoP { - string uuid = 1; - bytes cert = 2; -} - -message JKleppmannTreeNodeMetaFileP { - string name = 1; - string fileIno = 2; -} - -message JKleppmannTreeNodeMetaDirectoryP { +message JObjectKeyP { string name = 1; } -message JKleppmannTreeNodeMetaP { - oneof meta { - JKleppmannTreeNodeMetaFileP jKleppmannTreeNodeMetaFile = 1; - JKleppmannTreeNodeMetaDirectoryP jKleppmannTreeNodeMetaDirectory = 2; +message PeerIdP { + JObjectKeyP id = 1; +} + +message ObjectChangelog { + message entries_Entry { + PeerIdP key = 1; + int64 value = 2; } + repeated entries_Entry entries = 1; } -message JKleppmannTreeOpP { - int64 timestamp = 1; - string peer = 2; - string newParentId = 3; - JKleppmannTreeNodeMetaP meta = 4; - string child = 5; +// TODO: Serialization + +message JDataRemoteP { + bytes serializedData = 1; } -message JKleppmannTreeNodePChildrenEntry { - string key = 1; - string value = 2; -} - -message JKleppmannTreeNodeP { - optional string parent = 1; - string id = 2; - repeated JKleppmannTreeNodePChildrenEntry children = 3; - optional JKleppmannTreeNodeMetaP meta = 4; - optional JKleppmannTreeOpP lastEffectiveOp = 5; -} - -message JKleppmannTreePersistentDataPQueueEntry { - int64 clock = 1; - string uuid = 2; - JKleppmannTreeOpP op = 3; -} - -message JKleppmannTreePersistentDataPQueue { - string node = 1; - repeated JKleppmannTreePersistentDataPQueueEntry entries = 2; -} - -message JKleppmannTreePersistentDataPTimestampEntry { - string host = 1; - int64 timestamp = 2; -} - -message JKleppmannTreeOpLogEffectP { - optional JKleppmannTreeOpP oldEffectiveMove = 1; - optional string oldParent = 2; - optional JKleppmannTreeNodeMetaP oldMeta = 3; - JKleppmannTreeOpP effectiveOp = 4; - string newParentId = 5; - JKleppmannTreeNodeMetaP newMeta = 6; - string selfId = 7; -} - -message JKleppmannTreeOpLogPEntry { - int64 clock = 1; - string uuid = 2; - JKleppmannTreeOpP op = 3; - repeated JKleppmannTreeOpLogEffectP effects = 4; -} - -message JKleppmannTreePersistentDataP { - string treeName = 1; - int64 clock = 2; - repeated JKleppmannTreePersistentDataPQueue queues = 3; - repeated JKleppmannTreePersistentDataPTimestampEntry peerLog = 4; - repeated JKleppmannTreeOpLogPEntry opLog = 5; -} - -message PeerDirectoryLocalP { - repeated string initialOpSyncDonePeers = 1; - repeated string initialObjSyncDonePeers = 2; -} - -message JObjectDataP { - oneof obj { - FileP file = 2; - DirectoryP directory = 3; - ChunkDataP chunkData = 5; - PeerDirectoryP peerDirectory = 6; - PersistentPeerInfoP persistentPeerInfo = 7; - JKleppmannTreeNodeP jKleppmannTreeNode = 8; - JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; - PeerDirectoryLocalP peerDirectoryLocal = 10; - } +message JDataP { + bytes serializedData = 1; } \ No newline at end of file diff --git a/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto b/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto index 8ef94946..2baccab9 100644 --- a/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto +++ b/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto @@ -9,94 +9,45 @@ option java_outer_classname = "DhfsObjectSyncApi"; package dhfs.objects.sync; service DhfsObjectSyncGrpc { + rpc OpPush (OpPushRequest) returns (OpPushReply) {} + rpc GetObject (GetObjectRequest) returns (GetObjectReply) {} rpc CanDelete (CanDeleteRequest) returns (CanDeleteReply) {} - rpc IndexUpdate (IndexUpdatePush) returns (IndexUpdateReply) {} - rpc OpPush (OpPushMsg) returns (OpPushReply) {} rpc Ping (PingRequest) returns (PingReply) {} } -message PingRequest { - string selfUuid = 1; -} +message PingRequest {} -message PingReply { - string selfUuid = 1; -} - -message ObjectChangelogEntry { - string host = 1; - uint64 version = 2; -} - -message ObjectChangelog { - repeated ObjectChangelogEntry entries = 1; -} - -message ObjectHeader { - string name = 2; - ObjectChangelog changelog = 5; - optional dhfs.objects.persistence.JObjectDataP pushedData = 6; -} - -message ApiObject { - ObjectHeader header = 1; - dhfs.objects.persistence.JObjectDataP content = 2; -} +message PingReply {} message GetObjectRequest { - string selfUuid = 10; - - string name = 2; + dhfs.objects.persistence.JObjectKeyP name = 2; } message GetObjectReply { - string selfUuid = 10; - - ApiObject object = 1; + dhfs.objects.persistence.ObjectChangelog changelog = 5; + dhfs.objects.persistence.JDataRemoteP pushedData = 6; } message CanDeleteRequest { - string selfUuid = 10; - - string name = 2; - repeated string ourReferrers = 3; + dhfs.objects.persistence.JObjectKeyP name = 2; + repeated dhfs.objects.persistence.JObjectKeyP ourReferrers = 3; } message CanDeleteReply { - string selfUuid = 10; - string objName = 1; bool deletionCandidate = 2; - repeated string referrers = 3; + repeated dhfs.objects.persistence.JObjectKeyP referrers = 3; } -message IndexUpdatePush { - string selfUuid = 10; - - ObjectHeader header = 1; -} - -message IndexUpdateReply {} - -message JKleppmannTreePeriodicPushOpP { - string fromUuid = 1; - int64 timestamp = 2; -} - -message OpPushPayload { - oneof payload { - dhfs.objects.persistence.JKleppmannTreeOpP jKleppmannTreeOpWrapper = 1; - JKleppmannTreePeriodicPushOpP jKleppmannTreePeriodicPushOp = 2; - } -} - -message OpPushMsg { - string selfUuid = 10; - string queueId = 1; - repeated OpPushPayload msg = 2; +message OpPushRequest { + repeated OpP msg = 2; } message OpPushReply { -} \ No newline at end of file +} + +message OpP { + bytes serializedData = 1; +} diff --git a/dhfs-parent/server/src/main/resources/application.properties b/dhfs-parent/server/src/main/resources/application.properties index 1fb7caa0..4f499e77 100644 --- a/dhfs-parent/server/src/main/resources/application.properties +++ b/dhfs-parent/server/src/main/resources/application.properties @@ -1,16 +1,16 @@ quarkus.grpc.server.use-separate-server=false -dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs -dhfs.objects.root=${HOME}/dhfs_default/data/stuff dhfs.objects.peerdiscovery.port=42069 -dhfs.objects.peerdiscovery.interval=5000 +dhfs.objects.peerdiscovery.interval=5s +dhfs.objects.peerdiscovery.broadcast=true dhfs.objects.sync.timeout=30 dhfs.objects.sync.ping.timeout=5 -dhfs.objects.invalidation.threads=4 +dhfs.objects.invalidation.threads=1 dhfs.objects.invalidation.delay=1000 dhfs.objects.reconnect_interval=5s dhfs.objects.write_log=false dhfs.objects.periodic-push-op-interval=5m dhfs.fuse.root=${HOME}/dhfs_default/fuse +dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff dhfs.fuse.debug=false dhfs.fuse.enabled=true dhfs.files.allow_recursive_delete=false @@ -22,13 +22,6 @@ dhfs.files.write_merge_limit=1.2 # Don't take blocks of this size and above when merging dhfs.files.write_merge_max_chunk_to_take=1 dhfs.files.write_last_chunk_limit=1.5 -dhfs.objects.writeback.delay=100 -dhfs.objects.writeback.limit=134217728 -dhfs.objects.lru.limit=134217728 -dhfs.objects.lru.print-stats=false -dhfs.objects.writeback.watermark-high=0.6 -dhfs.objects.writeback.watermark-low=0.4 -dhfs.objects.writeback.threads=4 dhfs.objects.deletion.delay=1000 dhfs.objects.deletion.can-delete-retry-delay=10000 dhfs.objects.ref_verification=true @@ -40,6 +33,7 @@ dhfs.objects.ref-processor.threads=4 dhfs.objects.opsender.batch-size=100 dhfs.objects.lock_timeout_secs=2 dhfs.local-discovery=true +dhfs.peerdiscovery.timeout=5000 quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE quarkus.log.category."com.usatiuk.dhfs".level=TRACE quarkus.http.insecure-requests=enabled diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java index 03f74be5..e5a9f59b 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java @@ -9,7 +9,8 @@ import java.util.HashMap; import java.util.Map; abstract public class TempDataProfile implements QuarkusTestProfile { - protected void getConfigOverrides(Map toPut) {} + protected void getConfigOverrides(Map toPut) { + } @Override final public Map getConfigOverrides() { @@ -21,7 +22,6 @@ abstract public class TempDataProfile implements QuarkusTestProfile { } var ret = new HashMap(); ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString()); - ret.put("dhfs.objects.root", tempDirWithPrefix.resolve("dhfs_root_d_test").toString()); ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString()); getConfigOverrides(ret); return ret; diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java index 2a6979a6..b3659d01 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java @@ -17,13 +17,10 @@ import java.util.Objects; public class TestDataCleaner { @ConfigProperty(name = "dhfs.objects.persistence.files.root") String tempDirectory; - @ConfigProperty(name = "dhfs.objects.root") - String tempDirectoryIdx; void init(@Observes @Priority(1) StartupEvent event) throws IOException { try { purgeDirectory(Path.of(tempDirectory).toFile()); - purgeDirectory(Path.of(tempDirectoryIdx).toFile()); } catch (Exception ignored) { Log.warn("Couldn't cleanup test data on init"); } @@ -31,7 +28,6 @@ public class TestDataCleaner { void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException { purgeDirectory(Path.of(tempDirectory).toFile()); - purgeDirectory(Path.of(tempDirectoryIdx).toFile()); } void purgeDirectory(File dir) { diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java index 96acf3f5..504c1cc6 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java @@ -3,6 +3,7 @@ package com.usatiuk.dhfs.benchmarks; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.TempDataProfile; import com.usatiuk.dhfs.files.service.DhfsFileService; +import com.usatiuk.dhfs.objects.JObjectKey; import io.quarkus.test.junit.QuarkusTest; import io.quarkus.test.junit.TestProfile; import jakarta.inject.Inject; @@ -41,7 +42,7 @@ public class DhfsFileBenchmarkTest { @Test @Disabled void writeMbTest() { - String file = dhfsFileService.create("/writeMbTest", 0777).get(); + JObjectKey file = dhfsFileService.create("/writeMbTest", 0777).get(); var bb = ByteBuffer.allocateDirect(1024 * 1024); Benchmarker.runAndPrintMixSimple("dhfsFileService.write(\"\")", () -> { diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java index 8bea5c7e..6d02c516 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java @@ -1,25 +1,18 @@ package com.usatiuk.dhfs.files; -import com.google.protobuf.ByteString; import com.usatiuk.dhfs.TempDataProfile; -import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; import com.usatiuk.dhfs.files.service.DhfsFileService; -import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +import com.usatiuk.dhfs.objects.RemoteTransaction; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.AlreadyExistsException; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; - -import static org.awaitility.Awaitility.await; class Profiles { public static class DhfsFileServiceSimpleTestProfile extends TempDataProfile { @@ -50,62 +43,66 @@ public class DhfsFileServiceSimpleTestImpl { @Inject DhfsFileService fileService; @Inject - JObjectManager jObjectManager; + Transaction curTx; @Inject - JObjectTxManager jObjectTxManager; + TransactionManager jObjectTxManager; + @Inject + RemoteTransaction remoteTx; - @Test - void readTest() { - var fuuid = UUID.randomUUID(); - { - ChunkData c1 = new ChunkData(ByteString.copyFrom("12345".getBytes())); - ChunkData c2 = new ChunkData(ByteString.copyFrom("678".getBytes())); - ChunkData c3 = new ChunkData(ByteString.copyFrom("91011".getBytes())); - File f = new File(fuuid, 777, false); - f.getChunks().put(0L, c1.getName()); - f.getChunks().put((long) c1.getBytes().size(), c2.getName()); - f.getChunks().put((long) c1.getBytes().size() + c2.getBytes().size(), c3.getName()); +// @Test +// void readTest() { +// var fuuid = UUID.randomUUID(); +// { +// ChunkData c1 = new ChunkData(ByteString.copyFrom("12345".getBytes())); +// ChunkData c2 = new ChunkData(ByteString.copyFrom("678".getBytes())); +// ChunkData c3 = new ChunkData(ByteString.copyFrom("91011".getBytes())); +// File f = new File(fuuid, 777, false); +// f.chunks().put(0L, c1.getName()); +// f.chunks().put((long) c1.getBytes().size(), c2.getName()); +// f.chunks().put((long) c1.getBytes().size() + c2.getBytes().size(), c3.getName()); +// +// // FIXME: dhfs_files +// +// var c1o = new AtomicReference(); +// var c2o = new AtomicReference(); +// var c3o = new AtomicReference(); +// var fo = new AtomicReference(); +// +// jObjectTxManager.executeTx(() -> { +// c1o.set(curTx.put(c1, Optional.of(f.getName())).getMeta().getName()); +// c2o.set(curTx.put(c2, Optional.of(f.getName())).getMeta().getName()); +// c3o.set(curTx.put(c3, Optional.of(f.getName())).getMeta().getName()); +// fo.set(curTx.put(f, Optional.empty()).getMeta().getName()); +// }); +// +// var all = jObjectManager.findAll(); +// Assertions.assertTrue(all.contains(c1o.get())); +// Assertions.assertTrue(all.contains(c2o.get())); +// Assertions.assertTrue(all.contains(c3o.get())); +// Assertions.assertTrue(all.contains(fo.get())); +// } +// +// String all = "1234567891011"; +// +// { +// for (int start = 0; start < all.length(); start++) { +// for (int end = start; end <= all.length(); end++) { +// var read = fileService.read(fuuid.toString(), start, end - start); +// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray()); +// } +// } +// } +// } - // FIXME: dhfs_files - - var c1o = new AtomicReference(); - var c2o = new AtomicReference(); - var c3o = new AtomicReference(); - var fo = new AtomicReference(); - - jObjectTxManager.executeTx(() -> { - c1o.set(jObjectManager.put(c1, Optional.of(f.getName())).getMeta().getName()); - c2o.set(jObjectManager.put(c2, Optional.of(f.getName())).getMeta().getName()); - c3o.set(jObjectManager.put(c3, Optional.of(f.getName())).getMeta().getName()); - fo.set(jObjectManager.put(f, Optional.empty()).getMeta().getName()); - }); - - var all = jObjectManager.findAll(); - Assertions.assertTrue(all.contains(c1o.get())); - Assertions.assertTrue(all.contains(c2o.get())); - Assertions.assertTrue(all.contains(c3o.get())); - Assertions.assertTrue(all.contains(fo.get())); - } - - String all = "1234567891011"; - - { - for (int start = 0; start < all.length(); start++) { - for (int end = start; end <= all.length(); end++) { - var read = fileService.read(fuuid.toString(), start, end - start); - Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray()); - } - } - } - } - - @Test + @RepeatedTest(100) void dontMkdirTwiceTest() { Assertions.assertDoesNotThrow(() -> fileService.mkdir("/dontMkdirTwiceTest", 777)); Assertions.assertThrows(AlreadyExistsException.class, () -> fileService.mkdir("/dontMkdirTwiceTest", 777)); + fileService.unlink("/dontMkdirTwiceTest"); + Assertions.assertFalse(fileService.open("/dontMkdirTwiceTest").isPresent()); } - @Test + @RepeatedTest(100) void writeTest() { var ret = fileService.create("/writeTest", 777); Assertions.assertTrue(ret.isPresent()); @@ -114,6 +111,7 @@ public class DhfsFileServiceSimpleTestImpl { fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).get().toByteArray()); fileService.write(uuid, 4, new byte[]{10, 11, 12}); Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); fileService.write(uuid, 10, new byte[]{13, 14}); @@ -122,6 +120,9 @@ public class DhfsFileServiceSimpleTestImpl { Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); fileService.write(uuid, 3, new byte[]{17, 18}); Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); + + fileService.unlink("/writeTest"); + Assertions.assertFalse(fileService.open("/writeTest").isPresent()); } @Test @@ -153,19 +154,23 @@ public class DhfsFileServiceSimpleTestImpl { Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray()); } - @Test + @RepeatedTest(100) void truncateTest2() { var ret = fileService.create("/truncateTest2", 777); - Assertions.assertTrue(ret.isPresent()); + try { + Assertions.assertTrue(ret.isPresent()); - var uuid = ret.get(); + var uuid = ret.get(); - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - fileService.truncate(uuid, 20); - fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray()); + fileService.truncate(uuid, 20); + fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray()); + } finally { + fileService.unlink("/truncateTest2"); + } } @Test @@ -213,9 +218,12 @@ public class DhfsFileServiceSimpleTestImpl { fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}); Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray()); - var oldfile = jObjectManager.get(ret2.get()).orElseThrow(IllegalStateException::new); - var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); - var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); + + jObjectTxManager.run(() -> { + var oldfile = remoteTx.getData(File.class, ret2.get()).orElseThrow(IllegalStateException::new); +// var chunk = oldfile.chunks().get(0L); +// var chunkObj = remoteTx.getData(ChunkData.class, chunk).orElseThrow(IllegalStateException::new); + }); Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2")); Assertions.assertFalse(fileService.open("/moveOverTest1").isPresent()); @@ -224,14 +232,13 @@ public class DhfsFileServiceSimpleTestImpl { Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray()); - await().atMost(5, TimeUnit.SECONDS).until(() -> { - try { - return chunkObj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> !m.getReferrers().contains(uuid)); - } catch (DeletedObjectAccessException ignored) { - return true; - } - }); +// await().atMost(5, TimeUnit.SECONDS).until(() -> { +// jObjectTxManager.run(() -> { +// +// return chunkObj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, +// (m, d) -> !m.getReferrers().contains(uuid)); +// }); +// }); } @Test @@ -270,13 +277,13 @@ public class DhfsFileServiceSimpleTestImpl { fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new); - var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); - var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); - - chunkObj.runReadLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - Assertions.assertTrue(m.getReferrers().contains(uuid)); - }); +// var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new); +// var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); +// var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); +// +// chunkObj.runReadLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { +// Assertions.assertTrue(m.getReferrers().contains(uuid)); +// }); Assertions.assertTrue(fileService.rename("/moveTest2", "/movedTest2")); Assertions.assertFalse(fileService.open("/moveTest2").isPresent()); diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java index b9d9f92d..bdb70a8e 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java @@ -59,8 +59,8 @@ public class DhfsFuseIT { Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS); var c1curl = container1.execInContainer("/bin/sh", "-c", "curl --header \"Content-Type: application/json\" " + diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java index b401b053..089cee37 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java @@ -84,9 +84,9 @@ public class DhfsFusex3IT { Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); Assertions.assertDoesNotThrow(() -> UUID.fromString(c3uuid)); - waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS, 2); var c1curl = container1.execInContainer("/bin/sh", "-c", "curl --header \"Content-Type: application/json\" " + diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java index 5bec10e9..9583493a 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java @@ -12,10 +12,11 @@ import java.util.concurrent.TimeoutException; public class DhfsImage implements Future { + private static final DhfsImage INSTANCE = new DhfsImage(); private static String _builtImage = null; - private static DhfsImage INSTANCE = new DhfsImage(); - private DhfsImage() {} + private DhfsImage() { + } public static DhfsImage getInstance() { return INSTANCE; @@ -68,7 +69,8 @@ public class DhfsImage implements Future { .cmd("java", "-ea", "-Xmx128M", "--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED", "--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED", - "-Ddhfs.objects.peerdiscovery.interval=100", + "--add-opens=java.base/java.nio=ALL-UNNAMED", + "-Ddhfs.objects.peerdiscovery.interval=1s", "-Ddhfs.objects.invalidation.delay=100", "-Ddhfs.objects.deletion.delay=0", "-Ddhfs.objects.deletion.can-delete-retry-delay=1000", diff --git a/dhfs-parent/server/src/test/resources/application.properties b/dhfs-parent/server/src/test/resources/application.properties index 64f51835..ecef50b6 100644 --- a/dhfs-parent/server/src/test/resources/application.properties +++ b/dhfs-parent/server/src/test/resources/application.properties @@ -8,4 +8,5 @@ quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib quarkus.http.test-port=0 quarkus.http.test-ssl-port=0 -dhfs.local-discovery=false \ No newline at end of file +dhfs.local-discovery=false +dhfs.objects.persistence.snapshot-extra-checks=true \ No newline at end of file diff --git a/dhfs-parent/supportlib/src/main/java/com/usatiuk/dhfs/supportlib/UninitializedByteBuffer.java b/dhfs-parent/supportlib/src/main/java/com/usatiuk/dhfs/supportlib/UninitializedByteBuffer.java index c5f16629..42616fda 100644 --- a/dhfs-parent/supportlib/src/main/java/com/usatiuk/dhfs/supportlib/UninitializedByteBuffer.java +++ b/dhfs-parent/supportlib/src/main/java/com/usatiuk/dhfs/supportlib/UninitializedByteBuffer.java @@ -9,20 +9,24 @@ public class UninitializedByteBuffer { private static final Logger LOGGER = Logger.getLogger(UninitializedByteBuffer.class.getName()); public static ByteBuffer allocateUninitialized(int size) { - if (size < DhfsSupport.PAGE_SIZE) - return ByteBuffer.allocateDirect(size); + try { + if (size < DhfsSupport.PAGE_SIZE) + return ByteBuffer.allocateDirect(size); - var bb = new ByteBuffer[1]; - long token = DhfsSupport.allocateUninitializedByteBuffer(bb, size); - var ret = bb[0]; - CLEANER.register(ret, () -> { - try { - DhfsSupport.releaseByteBuffer(token); - } catch (Throwable e) { - LOGGER.severe("Error releasing buffer: " + e); - System.exit(-1); - } - }); - return ret; + var bb = new ByteBuffer[1]; + long token = DhfsSupport.allocateUninitializedByteBuffer(bb, size); + var ret = bb[0]; + CLEANER.register(ret, () -> { + try { + DhfsSupport.releaseByteBuffer(token); + } catch (Throwable e) { + LOGGER.severe("Error releasing buffer: " + e); + System.exit(-1); + } + }); + return ret; + } catch (OutOfMemoryError e) { + return ByteBuffer.allocate(size); + } } } diff --git a/dhfs-parent/utils/pom.xml b/dhfs-parent/utils/pom.xml new file mode 100644 index 00000000..4e59f908 --- /dev/null +++ b/dhfs-parent/utils/pom.xml @@ -0,0 +1,54 @@ + + + 4.0.0 + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + utils + + + 21 + 21 + UTF-8 + + + + + io.quarkus + quarkus-junit5 + test + + + io.quarkus + quarkus-arc + + + io.quarkus + quarkus-grpc + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.apache.commons + commons-lang3 + + + org.jboss.slf4j + slf4j-jboss-logmanager + test + + + commons-io + commons-io + + + + \ No newline at end of file diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/AutoCloseableNoThrow.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/AutoCloseableNoThrow.java new file mode 100644 index 00000000..29ec47ca --- /dev/null +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/AutoCloseableNoThrow.java @@ -0,0 +1,6 @@ +package com.usatiuk.dhfs.utils; + +public interface AutoCloseableNoThrow extends AutoCloseable { + @Override + void close(); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/utils/ByteUtils.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/ByteUtils.java similarity index 93% rename from dhfs-parent/server/src/main/java/com/usatiuk/utils/ByteUtils.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/ByteUtils.java index f7075b40..dba58508 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/utils/ByteUtils.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/ByteUtils.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; import java.nio.ByteBuffer; diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java new file mode 100644 index 00000000..1292e235 --- /dev/null +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java @@ -0,0 +1,74 @@ +package com.usatiuk.dhfs.utils; + +import io.quarkus.logging.Log; + +import java.lang.ref.Cleaner; +import java.util.concurrent.ConcurrentHashMap; + +public class DataLocker { + private static final AutoCloseableNoThrow DUMMY_LOCK = () -> { + }; + private final ConcurrentHashMap _locks = new ConcurrentHashMap<>(); + + public AutoCloseableNoThrow lock(Object data) { + while (true) { + try { + var tag = _locks.get(data); + if (tag != null) { + synchronized (tag) { + if (!tag.released) { + if (tag.owner == Thread.currentThread()) { + return DUMMY_LOCK; + } + tag.wait(); + } + continue; + } + } + } catch (InterruptedException ignored) { + } + + var newTag = new LockTag(); + var oldTag = _locks.putIfAbsent(data, newTag); + if (oldTag == null) { + return new Lock(data, newTag); + } + } + } + + private static class LockTag { + final Thread owner = Thread.currentThread(); + // final StackTraceElement[] _creationStack = Thread.currentThread().getStackTrace(); + boolean released = false; + } + + private class Lock implements AutoCloseableNoThrow { + private static final Cleaner CLEANER = Cleaner.create(); + private final Object _key; + private final LockTag _tag; + + public Lock(Object key, LockTag tag) { + _key = key; + _tag = tag; + CLEANER.register(this, () -> { + if (!tag.released) { + Log.error("Lock collected without release: " + key); + } + }); + } + + @Override + public void close() { + synchronized (_tag) { + if (_tag.released) + return; + _tag.released = true; + // Notify all because when the object is locked again, + // it's a different lock tag + _tag.notifyAll(); + _locks.remove(_key, _tag); + } + } + } + +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/utils/HashSetDelayedBlockingQueue.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java similarity index 98% rename from dhfs-parent/server/src/main/java/com/usatiuk/utils/HashSetDelayedBlockingQueue.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java index 51d23509..e37aa9ea 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/utils/HashSetDelayedBlockingQueue.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java @@ -1,7 +1,6 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; import jakarta.annotation.Nullable; -import lombok.Getter; import java.util.ArrayList; import java.util.Collection; @@ -11,15 +10,24 @@ import java.util.function.Function; public class HashSetDelayedBlockingQueue { private final LinkedHashMap> _set = new LinkedHashMap<>(); private final Object _sleepSynchronizer = new Object(); - @Getter private long _delay; - private boolean _closed = false; public HashSetDelayedBlockingQueue(long delay) { _delay = delay; } + public long getDelay() { + return _delay; + } + + public void setDelay(long delay) { + synchronized (_sleepSynchronizer) { + _delay = delay; + _sleepSynchronizer.notifyAll(); + } + } + // If there's object with key in the queue, don't do anything // Returns whether it was added or not public boolean add(T el) { @@ -250,13 +258,6 @@ public class HashSetDelayedBlockingQueue { return out; } - public void setDelay(long delay) { - synchronized (_sleepSynchronizer) { - _delay = delay; - _sleepSynchronizer.notifyAll(); - } - } - private record SetElement(T el, long time) { } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/SerializationHelper.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/SerializationHelper.java similarity index 90% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/SerializationHelper.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/SerializationHelper.java index 977b2307..d285a821 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/SerializationHelper.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/SerializationHelper.java @@ -1,8 +1,7 @@ -package com.usatiuk.dhfs; +package com.usatiuk.dhfs.utils; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.files.objects.File; import org.apache.commons.io.input.ClassLoaderObjectInputStream; import org.apache.commons.lang3.SerializationUtils; @@ -12,10 +11,9 @@ import java.io.InputStream; import java.io.Serializable; public abstract class SerializationHelper { - // Taken from SerializationUtils public static T deserialize(final InputStream inputStream) { - try (ClassLoaderObjectInputStream in = new ClassLoaderObjectInputStream(File.class.getClassLoader(), inputStream)) { + try (ClassLoaderObjectInputStream in = new ClassLoaderObjectInputStream(SerializationHelper.class.getClassLoader(), inputStream)) { final T obj = (T) in.readObject(); return obj; } catch (IOException | ClassNotFoundException e) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/utils/StatusRuntimeExceptionNoStacktrace.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/StatusRuntimeExceptionNoStacktrace.java similarity index 94% rename from dhfs-parent/server/src/main/java/com/usatiuk/utils/StatusRuntimeExceptionNoStacktrace.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/StatusRuntimeExceptionNoStacktrace.java index 963da69d..40897edc 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/utils/StatusRuntimeExceptionNoStacktrace.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/StatusRuntimeExceptionNoStacktrace.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; import io.grpc.Metadata; import io.grpc.Status; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/utils/VoidFn.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFn.java similarity index 68% rename from dhfs-parent/server/src/main/java/com/usatiuk/utils/VoidFn.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFn.java index 46f4ff0c..e20d6707 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/utils/VoidFn.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFn.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; @FunctionalInterface public interface VoidFn { diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFnThrows.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFnThrows.java new file mode 100644 index 00000000..64dfe36f --- /dev/null +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFnThrows.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.utils; + +@FunctionalInterface +public interface VoidFnThrows { + void apply() throws Throwable; +} + diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/utils/HashSetDelayedBlockingQueueTest.java b/dhfs-parent/utils/src/test/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueueTest.java similarity index 99% rename from dhfs-parent/server/src/test/java/com/usatiuk/utils/HashSetDelayedBlockingQueueTest.java rename to dhfs-parent/utils/src/test/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueueTest.java index d68998cf..70f36cc9 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/utils/HashSetDelayedBlockingQueueTest.java +++ b/dhfs-parent/utils/src/test/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueueTest.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; diff --git a/webui/src/api/dto.ts b/webui/src/api/dto.ts index ac35aa0a..dd90c71a 100644 --- a/webui/src/api/dto.ts +++ b/webui/src/api/dto.ts @@ -39,8 +39,8 @@ export type TTokenToResp = z.infer; // AvailablePeerInfo export const AvailablePeerInfoTo = z.object({ uuid: z.string(), - addr: z.string(), - port: z.number(), + // addr: z.string(), + // port: z.number(), }); export type TAvailablePeerInfoTo = z.infer;