mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-28 20:47:49 +01:00
26
.github/workflows/server.yml
vendored
26
.github/workflows/server.yml
vendored
@@ -54,12 +54,12 @@ jobs:
|
||||
# - name: Build with Maven
|
||||
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-parent/server/target/quarkus-app
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Test logs
|
||||
@@ -84,7 +84,7 @@ jobs:
|
||||
- name: NPM Build
|
||||
run: cd webui && npm run build
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Webui
|
||||
path: webui/dist
|
||||
@@ -155,7 +155,7 @@ jobs:
|
||||
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result"
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }}
|
||||
path: result
|
||||
@@ -168,7 +168,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: downloaded-libs
|
||||
|
||||
@@ -180,7 +180,7 @@ jobs:
|
||||
test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1
|
||||
|
||||
- name: Upload
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: result
|
||||
@@ -201,19 +201,19 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download server package
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-package-downloaded
|
||||
|
||||
- name: Download webui
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: Webui
|
||||
path: webui-dist-downloaded
|
||||
|
||||
- name: Download native libs
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: dhfs-native-downloaded
|
||||
@@ -299,17 +299,17 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: DHFS Server Package
|
||||
path: dhfs-package-downloaded
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: Webui
|
||||
path: webui-dist-downloaded
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: NativeLibs
|
||||
path: dhfs-native-downloaded
|
||||
@@ -339,7 +339,7 @@ jobs:
|
||||
run: tar -cvf ~/run-wrapper.tar.gz ./run-wrapper-out
|
||||
|
||||
- name: Upload
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Run wrapper
|
||||
path: ~/run-wrapper.tar.gz
|
||||
|
||||
@@ -34,11 +34,6 @@
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
||||
@@ -68,11 +68,11 @@ class AutoprotomapProcessor {
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(e.toString() + "\n");
|
||||
sb.append(e + "\n");
|
||||
for (var el : e.getStackTrace()) {
|
||||
sb.append(el.toString() + "\n");
|
||||
}
|
||||
System.out.println(sb.toString());
|
||||
System.out.println(sb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
@@ -61,7 +62,7 @@ public class ProtoSerializerGenerator {
|
||||
visitor.accept(cur);
|
||||
|
||||
var next = cur.superClassType().name();
|
||||
if (next.equals(DotName.OBJECT_NAME)) break;
|
||||
if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break;
|
||||
cur = index.getClassByName(next);
|
||||
}
|
||||
}
|
||||
@@ -82,6 +83,10 @@ public class ProtoSerializerGenerator {
|
||||
|
||||
var objectClass = index.getClassByName(objectType.name().toString());
|
||||
|
||||
Function<String, String> getterGetter = objectClass.isRecord()
|
||||
? Function.identity()
|
||||
: s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX));
|
||||
|
||||
for (var f : findAllFields(index, objectClass)) {
|
||||
var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX);
|
||||
|
||||
@@ -89,7 +94,7 @@ public class ProtoSerializerGenerator {
|
||||
if ((f.flags() & Opcodes.ACC_PUBLIC) != 0)
|
||||
return bytecodeCreator.readInstanceField(f, object);
|
||||
else {
|
||||
var fieldGetter = "get" + capitalize(stripPrefix(f.name(), FIELD_PREFIX));
|
||||
var fieldGetter = getterGetter.apply(f.name());
|
||||
return bytecodeCreator.invokeVirtualMethod(
|
||||
MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object);
|
||||
}
|
||||
|
||||
@@ -22,10 +22,6 @@
|
||||
<artifactId>lombok</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-resteasy-reactive</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
@@ -41,11 +37,6 @@
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.rest-assured</groupId>
|
||||
<artifactId>rest-assured</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.ws.rs.GET;
|
||||
import jakarta.ws.rs.Path;
|
||||
|
||||
@Path("/autoprotomap")
|
||||
@ApplicationScoped
|
||||
public class AutoprotomapResource {
|
||||
// add some rest methods here
|
||||
|
||||
@GET
|
||||
public String hello() {
|
||||
return "Hello autoprotomap";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(InterfaceObjectProto.class)
|
||||
public interface InterfaceObject {
|
||||
String key();
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(RecordObjectProto.class)
|
||||
public record RecordObject(String key) implements InterfaceObject {
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.autoprotomap.it;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
|
||||
@ProtoMirror(RecordObject2Proto.class)
|
||||
public record RecordObject2(String key, int value) implements InterfaceObject {
|
||||
}
|
||||
@@ -28,4 +28,20 @@ message AbstractProto {
|
||||
SimpleObjectProto simpleObject = 2;
|
||||
CustomObjectProto customObject = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message RecordObjectProto {
|
||||
string key = 1;
|
||||
}
|
||||
|
||||
message RecordObject2Proto {
|
||||
string key = 1;
|
||||
int32 value = 2;
|
||||
}
|
||||
|
||||
message InterfaceObjectProto {
|
||||
oneof obj {
|
||||
RecordObjectProto recordObject = 1;
|
||||
RecordObject2Proto recordObject2 = 2;
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,8 @@ public class AutoprotomapResourceTest {
|
||||
ProtoSerializer<NestedObjectProto, NestedObject> nestedProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<AbstractProto, AbstractObject> abstractProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<InterfaceObjectProto, InterfaceObject> interfaceProtoSerializer;
|
||||
|
||||
@Test
|
||||
public void testSimple() {
|
||||
@@ -74,7 +76,7 @@ public class AutoprotomapResourceTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void tesAbstractNested() {
|
||||
public void testAbstractNested() {
|
||||
var ret = abstractProtoSerializer.serialize(
|
||||
new NestedObject(
|
||||
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
|
||||
@@ -93,4 +95,19 @@ public class AutoprotomapResourceTest {
|
||||
Assertions.assertEquals("nested obj", des.get_nestedName());
|
||||
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInterface() {
|
||||
var ret = interfaceProtoSerializer.serialize(new RecordObject("record test"));
|
||||
Assertions.assertEquals("record test", ret.getRecordObject().getKey());
|
||||
var des = (RecordObject) interfaceProtoSerializer.deserialize(ret);
|
||||
Assertions.assertEquals("record test", des.key());
|
||||
|
||||
var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234));
|
||||
Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey());
|
||||
Assertions.assertEquals(1234, ret2.getRecordObject2().getValue());
|
||||
var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2);
|
||||
Assertions.assertEquals("record test 2", des2.key());
|
||||
Assertions.assertEquals(1234, des2.value());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,19 +13,22 @@
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter-engine</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -18,11 +18,6 @@ public class AtomicClock implements Clock<Long>, Serializable {
|
||||
_max = timestamp;
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
public void ungetTimestamp() {
|
||||
--_max;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long peekTimestamp() {
|
||||
return _max;
|
||||
|
||||
@@ -8,15 +8,16 @@ import java.util.function.Function;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT, WrapperT extends TreeNodeWrapper<TimestampT, PeerIdT, MetaT, NodeIdT>> {
|
||||
public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
|
||||
private static final Logger LOGGER = Logger.getLogger(KleppmannTree.class.getName());
|
||||
private final StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT, WrapperT> _storage;
|
||||
|
||||
private final StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> _storage;
|
||||
private final PeerInterface<PeerIdT> _peers;
|
||||
private final Clock<TimestampT> _clock;
|
||||
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
|
||||
private HashMap<NodeIdT, WrapperT> _undoCtx = null;
|
||||
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
|
||||
|
||||
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT, WrapperT> storage,
|
||||
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
|
||||
PeerInterface<PeerIdT> peers,
|
||||
Clock<TimestampT> clock,
|
||||
OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> opRecorder) {
|
||||
@@ -30,13 +31,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
if (names.isEmpty()) return fromId;
|
||||
|
||||
var from = _storage.getById(fromId);
|
||||
from.rLock();
|
||||
NodeIdT childId;
|
||||
try {
|
||||
childId = from.getNode().getChildren().get(names.getFirst());
|
||||
} finally {
|
||||
from.rUnlock();
|
||||
}
|
||||
childId = from.children().get(names.getFirst());
|
||||
|
||||
if (childId == null)
|
||||
return null;
|
||||
@@ -45,69 +41,58 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
public NodeIdT traverse(NodeIdT fromId, List<String> names) {
|
||||
_storage.rLock();
|
||||
try {
|
||||
return traverseImpl(fromId, names.subList(1, names.size()));
|
||||
} finally {
|
||||
_storage.rUnlock();
|
||||
}
|
||||
return traverseImpl(fromId, names.subList(1, names.size()));
|
||||
}
|
||||
|
||||
public NodeIdT traverse(List<String> names) {
|
||||
_storage.rLock();
|
||||
try {
|
||||
return traverseImpl(_storage.getRootId(), names);
|
||||
} finally {
|
||||
_storage.rUnlock();
|
||||
}
|
||||
return traverseImpl(_storage.getRootId(), names);
|
||||
}
|
||||
|
||||
private void undoEffect(LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT> effect) {
|
||||
_storage.assertRwLock();
|
||||
if (effect.oldInfo() != null) {
|
||||
var node = _storage.getById(effect.childId());
|
||||
var oldParent = _storage.getById(effect.oldInfo().oldParent());
|
||||
var curParent = _storage.getById(effect.newParentId());
|
||||
curParent.rwLock();
|
||||
oldParent.rwLock();
|
||||
node.rwLock();
|
||||
try {
|
||||
curParent.getNode().getChildren().remove(node.getNode().getMeta().getName());
|
||||
if (!node.getNode().getMeta().getClass().equals(effect.oldInfo().oldMeta().getClass()))
|
||||
throw new IllegalArgumentException("Class mismatch for meta for node " + node.getNode().getId());
|
||||
node.getNode().setMeta(effect.oldInfo().oldMeta());
|
||||
node.getNode().setParent(oldParent.getNode().getId());
|
||||
oldParent.getNode().getChildren().put(node.getNode().getMeta().getName(), node.getNode().getId());
|
||||
node.notifyRmRef(curParent.getNode().getId());
|
||||
node.notifyRef(oldParent.getNode().getId());
|
||||
node.getNode().setLastEffectiveOp(effect.oldInfo().oldEffectiveMove());
|
||||
} finally {
|
||||
node.rwUnlock();
|
||||
oldParent.rwUnlock();
|
||||
curParent.rwUnlock();
|
||||
{
|
||||
var newCurParentChildren = curParent.children().minus(node.meta().getName());
|
||||
curParent = curParent.withChildren(newCurParentChildren);
|
||||
_storage.putNode(curParent);
|
||||
}
|
||||
|
||||
if (!node.meta().getClass().equals(effect.oldInfo().oldMeta().getClass()))
|
||||
throw new IllegalArgumentException("Class mismatch for meta for node " + node.key());
|
||||
|
||||
// Needs to be read after changing curParent, as it might be the same node
|
||||
var oldParent = _storage.getById(effect.oldInfo().oldParent());
|
||||
{
|
||||
var newOldParentChildren = oldParent.children().plus(node.meta().getName(), node.key());
|
||||
oldParent = oldParent.withChildren(newOldParentChildren);
|
||||
_storage.putNode(oldParent);
|
||||
}
|
||||
_storage.putNode(
|
||||
node.withMeta(effect.oldInfo().oldMeta())
|
||||
.withParent(effect.oldInfo().oldParent())
|
||||
.withLastEffectiveOp(effect.oldInfo().oldEffectiveMove())
|
||||
);
|
||||
} else {
|
||||
var node = _storage.getById(effect.childId());
|
||||
var curParent = _storage.getById(effect.newParentId());
|
||||
curParent.rwLock();
|
||||
node.rwLock();
|
||||
try {
|
||||
curParent.getNode().getChildren().remove(node.getNode().getMeta().getName());
|
||||
node.freeze();
|
||||
node.getNode().setParent(null);
|
||||
node.getNode().setLastEffectiveOp(null);
|
||||
node.notifyRmRef(curParent.getNode().getId());
|
||||
_undoCtx.put(node.getNode().getId(), node);
|
||||
} finally {
|
||||
node.rwUnlock();
|
||||
curParent.rwUnlock();
|
||||
{
|
||||
var newCurParentChildren = curParent.children().minus(node.meta().getName());
|
||||
curParent = curParent.withChildren(newCurParentChildren);
|
||||
_storage.putNode(curParent);
|
||||
}
|
||||
_storage.putNode(
|
||||
node.withParent(null)
|
||||
.withLastEffectiveOp(null)
|
||||
);
|
||||
_undoCtx.put(node.key(), node);
|
||||
}
|
||||
}
|
||||
|
||||
private void undoOp(LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
|
||||
for (var e : op.effects().reversed())
|
||||
undoEffect(e);
|
||||
if (op.effects() != null)
|
||||
for (var e : op.effects().reversed())
|
||||
undoEffect(e);
|
||||
}
|
||||
|
||||
private void redoOp(Map.Entry<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> entry) {
|
||||
@@ -116,7 +101,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
private void doAndPut(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
_storage.assertRwLock();
|
||||
var res = doOp(op, failCreatingIfExists);
|
||||
_storage.getLog().put(res.op().timestamp(), res);
|
||||
}
|
||||
@@ -160,22 +144,15 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
if (!inTrash.isEmpty()) {
|
||||
var trash = _storage.getById(_storage.getTrashId());
|
||||
trash.rwLock();
|
||||
try {
|
||||
for (var n : inTrash) {
|
||||
var node = _storage.getById(n);
|
||||
node.rwLock();
|
||||
try {
|
||||
if (trash.getNode().getChildren().remove(n.toString()) == null)
|
||||
LOGGER.severe("Node " + node.getNode().getId() + " not found in trash but should be there");
|
||||
node.notifyRmRef(trash.getNode().getId());
|
||||
} finally {
|
||||
node.rwUnlock();
|
||||
}
|
||||
_storage.removeNode(n);
|
||||
for (var n : inTrash) {
|
||||
var node = _storage.getById(n);
|
||||
{
|
||||
if (!trash.children().containsKey(n.toString()))
|
||||
LOGGER.severe("Node " + node.key() + " not found in trash but should be there");
|
||||
trash = trash.withChildren(trash.children().minus(n.toString()));
|
||||
_storage.putNode(trash);
|
||||
}
|
||||
} finally {
|
||||
trash.rwUnlock();
|
||||
_storage.removeNode(n);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -188,29 +165,18 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
public void move(NodeIdT newParent, MetaT newMeta, NodeIdT child, boolean failCreatingIfExists) {
|
||||
_storage.rwLock();
|
||||
try {
|
||||
var createdMove = createMove(newParent, newMeta, child);
|
||||
_opRecorder.recordOp(createdMove);
|
||||
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
|
||||
} finally {
|
||||
_storage.rwUnlock();
|
||||
}
|
||||
var createdMove = createMove(newParent, newMeta, child);
|
||||
_opRecorder.recordOp(createdMove);
|
||||
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
|
||||
}
|
||||
|
||||
public void applyExternalOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
|
||||
_storage.rwLock();
|
||||
try {
|
||||
_clock.updateTimestamp(op.timestamp().timestamp());
|
||||
applyOp(from, op, false);
|
||||
} finally {
|
||||
_storage.rwUnlock();
|
||||
}
|
||||
_clock.updateTimestamp(op.timestamp().timestamp());
|
||||
applyOp(from, op, false);
|
||||
}
|
||||
|
||||
// Returns true if the timestamp is newer than what's seen, false otherwise
|
||||
private boolean updateTimestampImpl(PeerIdT from, TimestampT newTimestamp) {
|
||||
_storage.assertRwLock();
|
||||
TimestampT oldRef = _storage.getPeerTimestampLog().getForPeer(from);
|
||||
if (oldRef != null && oldRef.compareTo(newTimestamp) > 0) { // FIXME?
|
||||
LOGGER.warning("Wrong op order: received older than known from " + from.toString());
|
||||
@@ -221,31 +187,18 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
public boolean updateExternalTimestamp(PeerIdT from, TimestampT timestamp) {
|
||||
_storage.rLock();
|
||||
try {
|
||||
// TODO: Ideally no point in this separate locking?
|
||||
var gotExt = _storage.getPeerTimestampLog().getForPeer(from);
|
||||
var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId());
|
||||
if ((gotExt != null && gotExt.compareTo(timestamp) >= 0)
|
||||
&& (gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0)) return false;
|
||||
} finally {
|
||||
_storage.rUnlock();
|
||||
}
|
||||
_storage.rwLock();
|
||||
try {
|
||||
updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack?
|
||||
updateTimestampImpl(from, timestamp);
|
||||
tryTrimLog();
|
||||
} finally {
|
||||
_storage.rwUnlock();
|
||||
}
|
||||
|
||||
// TODO: Ideally no point in this separate locking?
|
||||
var gotExt = _storage.getPeerTimestampLog().getForPeer(from);
|
||||
var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId());
|
||||
if ((gotExt != null && gotExt.compareTo(timestamp) >= 0)
|
||||
&& (gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0)) return false;
|
||||
updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack?
|
||||
updateTimestampImpl(from, timestamp);
|
||||
tryTrimLog();
|
||||
return true;
|
||||
}
|
||||
|
||||
private void applyOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
_storage.assertRwLock();
|
||||
|
||||
if (!updateTimestampImpl(from, op.timestamp().timestamp())) return;
|
||||
|
||||
var log = _storage.getLog();
|
||||
@@ -276,7 +229,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
if (!_undoCtx.isEmpty()) {
|
||||
for (var e : _undoCtx.entrySet()) {
|
||||
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
|
||||
e.getValue().unfreeze();
|
||||
_storage.removeNode(e.getKey());
|
||||
}
|
||||
}
|
||||
@@ -292,12 +244,10 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
private CombinedTimestamp<TimestampT, PeerIdT> getTimestamp() {
|
||||
_storage.assertRwLock();
|
||||
return new CombinedTimestamp<>(_clock.getTimestamp(), _peers.getSelfId());
|
||||
}
|
||||
|
||||
private <LocalMetaT extends MetaT> OpMove<TimestampT, PeerIdT, LocalMetaT, NodeIdT> createMove(NodeIdT newParent, LocalMetaT newMeta, NodeIdT node) {
|
||||
_storage.assertRwLock();
|
||||
return new OpMove<>(getTimestamp(), newParent, newMeta, node);
|
||||
}
|
||||
|
||||
@@ -317,91 +267,73 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return computed;
|
||||
}
|
||||
|
||||
private WrapperT getNewNode(TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> desired) {
|
||||
_storage.assertRwLock();
|
||||
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
|
||||
if (_undoCtx != null) {
|
||||
var node = _undoCtx.get(desired.getId());
|
||||
var node = _undoCtx.get(key);
|
||||
if (node != null) {
|
||||
node.rwLock();
|
||||
try {
|
||||
if (!node.getNode().getChildren().isEmpty()) {
|
||||
LOGGER.log(Level.WARNING, "Not empty children for undone node " + desired.getId());
|
||||
if (!node.children().isEmpty()) {
|
||||
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
|
||||
}
|
||||
node.getNode().setParent(desired.getParent());
|
||||
node.notifyRef(desired.getParent());
|
||||
node.getNode().setMeta(desired.getMeta());
|
||||
node.unfreeze();
|
||||
node = node.withParent(parent).withMeta(meta);
|
||||
} catch (Exception e) {
|
||||
LOGGER.log(Level.SEVERE, "Error while fixing up node " + desired.getId(), e);
|
||||
node.rwUnlock();
|
||||
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
|
||||
node = null;
|
||||
}
|
||||
}
|
||||
if (node != null) {
|
||||
_undoCtx.remove(desired.getId());
|
||||
_undoCtx.remove(key);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
return _storage.createNewNode(desired);
|
||||
return _storage.createNewNode(key, parent, meta);
|
||||
}
|
||||
|
||||
private void applyEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> sourceOp, List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) {
|
||||
_storage.assertRwLock();
|
||||
for (var effect : effects) {
|
||||
WrapperT oldParentNode = null;
|
||||
WrapperT newParentNode;
|
||||
WrapperT node;
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> oldParentNode = null;
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> newParentNode;
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> node;
|
||||
|
||||
newParentNode = _storage.getById(effect.newParentId());
|
||||
newParentNode.rwLock();
|
||||
try {
|
||||
if (effect.oldInfo() != null) {
|
||||
oldParentNode = _storage.getById(effect.oldInfo().oldParent());
|
||||
oldParentNode.rwLock();
|
||||
}
|
||||
try {
|
||||
if (oldParentNode == null) {
|
||||
node = getNewNode(new TreeNode<>(effect.childId(), effect.newParentId(), effect.newMeta()));
|
||||
} else {
|
||||
node = _storage.getById(effect.childId());
|
||||
node.rwLock();
|
||||
}
|
||||
try {
|
||||
|
||||
if (oldParentNode != null) {
|
||||
oldParentNode.getNode().getChildren().remove(effect.oldInfo().oldMeta().getName());
|
||||
node.notifyRmRef(effect.oldInfo().oldParent());
|
||||
}
|
||||
|
||||
newParentNode.getNode().getChildren().put(effect.newMeta().getName(), effect.childId());
|
||||
if (effect.newParentId().equals(_storage.getTrashId()) &&
|
||||
!Objects.equals(effect.newMeta().getName(), effect.childId()))
|
||||
throw new IllegalArgumentException("Move to trash should have id of node as name");
|
||||
node.getNode().setParent(effect.newParentId());
|
||||
node.getNode().setMeta(effect.newMeta());
|
||||
node.getNode().setLastEffectiveOp(effect.effectiveOp());
|
||||
node.notifyRef(effect.newParentId());
|
||||
|
||||
} finally {
|
||||
node.rwUnlock();
|
||||
}
|
||||
} finally {
|
||||
if (oldParentNode != null)
|
||||
oldParentNode.rwUnlock();
|
||||
}
|
||||
} finally {
|
||||
newParentNode.rwUnlock();
|
||||
if (effect.oldInfo() != null) {
|
||||
oldParentNode = _storage.getById(effect.oldInfo().oldParent());
|
||||
}
|
||||
if (oldParentNode == null) {
|
||||
node = getNewNode(effect.childId(), effect.newParentId(), effect.newMeta());
|
||||
} else {
|
||||
node = _storage.getById(effect.childId());
|
||||
}
|
||||
if (oldParentNode != null) {
|
||||
var newOldParentChildren = oldParentNode.children().minus(effect.oldInfo().oldMeta().getName());
|
||||
oldParentNode = oldParentNode.withChildren(newOldParentChildren);
|
||||
_storage.putNode(oldParentNode);
|
||||
}
|
||||
|
||||
// Needs to be read after changing oldParentNode, as it might be the same node
|
||||
newParentNode = _storage.getById(effect.newParentId());
|
||||
|
||||
{
|
||||
var newNewParentChildren = newParentNode.children().plus(effect.newMeta().getName(), effect.childId());
|
||||
newParentNode = newParentNode.withChildren(newNewParentChildren);
|
||||
_storage.putNode(newParentNode);
|
||||
}
|
||||
if (effect.newParentId().equals(_storage.getTrashId()) &&
|
||||
!Objects.equals(effect.newMeta().getName(), effect.childId().toString()))
|
||||
throw new IllegalArgumentException("Move to trash should have id of node as name");
|
||||
_storage.putNode(
|
||||
node.withParent(effect.newParentId())
|
||||
.withMeta(effect.newMeta())
|
||||
.withLastEffectiveOp(sourceOp)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computeEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
|
||||
_storage.assertRwLock();
|
||||
var node = _storage.getById(op.childId());
|
||||
|
||||
NodeIdT oldParentId = (node != null && node.getNode().getParent() != null) ? node.getNode().getParent() : null;
|
||||
NodeIdT oldParentId = (node != null && node.parent() != null) ? node.parent() : null;
|
||||
NodeIdT newParentId = op.newParentId();
|
||||
WrapperT newParent = _storage.getById(newParentId);
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> newParent = _storage.getById(newParentId);
|
||||
|
||||
if (newParent == null) {
|
||||
LOGGER.log(Level.SEVERE, "New parent not found " + op.newMeta().getName() + " " + op.childId());
|
||||
@@ -409,34 +341,29 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
if (oldParentId == null) {
|
||||
newParent.rLock();
|
||||
try {
|
||||
var conflictNodeId = newParent.getNode().getChildren().get(op.newMeta().getName());
|
||||
var conflictNodeId = newParent.children().get(op.newMeta().getName());
|
||||
|
||||
if (conflictNodeId != null) {
|
||||
if (failCreatingIfExists)
|
||||
throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId);
|
||||
if (conflictNodeId != null) {
|
||||
if (failCreatingIfExists)
|
||||
throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId);
|
||||
|
||||
var conflictNode = _storage.getById(conflictNodeId);
|
||||
conflictNode.rLock();
|
||||
try {
|
||||
MetaT conflictNodeMeta = conflictNode.getNode().getMeta();
|
||||
String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.getNode().getId();
|
||||
String newOursName = op.newMeta().getName() + ".conflict." + op.childId();
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(new LogEffectOld<>(conflictNode.getNode().getLastEffectiveOp(), newParentId, conflictNodeMeta), conflictNode.getNode().getLastEffectiveOp(), newParentId, (MetaT) conflictNodeMeta.withName(newConflictNodeName), conflictNodeId),
|
||||
new LogEffect<>(null, op, op.newParentId(), (MetaT) op.newMeta().withName(newOursName), op.childId())
|
||||
));
|
||||
} finally {
|
||||
conflictNode.rUnlock();
|
||||
}
|
||||
} else {
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId())
|
||||
));
|
||||
var conflictNode = _storage.getById(conflictNodeId);
|
||||
MetaT conflictNodeMeta = conflictNode.meta();
|
||||
|
||||
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
} finally {
|
||||
newParent.rUnlock();
|
||||
|
||||
String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.key();
|
||||
String newOursName = op.newMeta().getName() + ".conflict." + op.childId();
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(new LogEffectOld<>(conflictNode.lastEffectiveOp(), newParentId, conflictNodeMeta), conflictNode.lastEffectiveOp(), newParentId, (MetaT) conflictNodeMeta.withName(newConflictNodeName), conflictNodeId),
|
||||
new LogEffect<>(null, op, op.newParentId(), (MetaT) op.newMeta().withName(newOursName), op.childId())
|
||||
));
|
||||
} else {
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId())
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -444,96 +371,69 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
node.rLock();
|
||||
newParent.rLock();
|
||||
try {
|
||||
MetaT oldMeta = node.getNode().getMeta();
|
||||
if (!oldMeta.getClass().equals(op.newMeta().getClass())) {
|
||||
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.getNode().getId());
|
||||
MetaT oldMeta = node.meta();
|
||||
if (!oldMeta.getClass().equals(op.newMeta().getClass())) {
|
||||
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
var replaceNodeId = newParent.children().get(op.newMeta().getName());
|
||||
if (replaceNodeId != null) {
|
||||
var replaceNode = _storage.getById(replaceNodeId);
|
||||
var replaceNodeMeta = replaceNode.meta();
|
||||
|
||||
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
var replaceNodeId = newParent.getNode().getChildren().get(op.newMeta().getName());
|
||||
if (replaceNodeId != null) {
|
||||
var replaceNode = _storage.getById(replaceNodeId);
|
||||
try {
|
||||
replaceNode.rLock();
|
||||
var replaceNodeMeta = replaceNode.getNode().getMeta();
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(new LogEffectOld<>(replaceNode.getNode().getLastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.getNode().getLastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId),
|
||||
new LogEffect<>(new LogEffectOld<>(node.getNode().getLastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId())
|
||||
));
|
||||
} finally {
|
||||
replaceNode.rUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(new LogEffectOld<>(node.getNode().getLastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId())
|
||||
new LogEffect<>(new LogEffectOld<>(replaceNode.lastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.lastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId),
|
||||
new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId())
|
||||
));
|
||||
} finally {
|
||||
newParent.rUnlock();
|
||||
node.rUnlock();
|
||||
}
|
||||
return new LogRecord<>(op, List.of(
|
||||
new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId())
|
||||
));
|
||||
}
|
||||
|
||||
private boolean isAncestor(NodeIdT child, NodeIdT parent) {
|
||||
var node = _storage.getById(parent);
|
||||
NodeIdT curParent;
|
||||
while ((curParent = node.getNode().getParent()) != null) {
|
||||
while ((curParent = node.parent()) != null) {
|
||||
if (Objects.equals(child, curParent)) return true;
|
||||
node = _storage.getById(curParent);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void walkTree(Consumer<WrapperT> consumer) {
|
||||
_storage.rLock();
|
||||
try {
|
||||
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
|
||||
queue.push(_storage.getRootId());
|
||||
public void walkTree(Consumer<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> consumer) {
|
||||
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
|
||||
queue.push(_storage.getRootId());
|
||||
|
||||
while (!queue.isEmpty()) {
|
||||
var id = queue.pop();
|
||||
var node = _storage.getById(id);
|
||||
if (node == null) continue;
|
||||
node.rLock();
|
||||
try {
|
||||
queue.addAll(node.getNode().getChildren().values());
|
||||
consumer.accept(node);
|
||||
} finally {
|
||||
node.rUnlock();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
_storage.rUnlock();
|
||||
while (!queue.isEmpty()) {
|
||||
var id = queue.pop();
|
||||
var node = _storage.getById(id);
|
||||
if (node == null) continue;
|
||||
queue.addAll(node.children().values());
|
||||
consumer.accept(node);
|
||||
}
|
||||
}
|
||||
|
||||
public Pair<String, NodeIdT> findParent(Function<WrapperT, Boolean> kidPredicate) {
|
||||
_storage.rLock();
|
||||
try {
|
||||
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
|
||||
queue.push(_storage.getRootId());
|
||||
public Pair<String, NodeIdT> findParent(Function<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>, Boolean> kidPredicate) {
|
||||
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
|
||||
queue.push(_storage.getRootId());
|
||||
|
||||
while (!queue.isEmpty()) {
|
||||
var id = queue.pop();
|
||||
var node = _storage.getById(id);
|
||||
if (node == null) continue;
|
||||
node.rLock();
|
||||
try {
|
||||
var children = node.getNode().getChildren();
|
||||
for (var childEntry : children.entrySet()) {
|
||||
var child = _storage.getById(childEntry.getValue());
|
||||
if (kidPredicate.apply(child)) {
|
||||
return Pair.of(childEntry.getKey(), node.getNode().getId());
|
||||
}
|
||||
}
|
||||
queue.addAll(children.values());
|
||||
} finally {
|
||||
node.rUnlock();
|
||||
while (!queue.isEmpty()) {
|
||||
var id = queue.pop();
|
||||
var node = _storage.getById(id);
|
||||
if (node == null) continue;
|
||||
var children = node.children();
|
||||
for (var childEntry : children.entrySet()) {
|
||||
var child = _storage.getById(childEntry.getValue());
|
||||
if (kidPredicate.apply(child)) {
|
||||
return Pair.of(childEntry.getKey(), node.key());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
_storage.rUnlock();
|
||||
queue.addAll(children.values());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@@ -541,27 +441,22 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
public void recordBoostrapFor(PeerIdT host) {
|
||||
TreeMap<CombinedTimestamp<TimestampT, PeerIdT>, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT>> result = new TreeMap<>();
|
||||
|
||||
_storage.rwLock();
|
||||
try {
|
||||
walkTree(node -> {
|
||||
var op = node.getNode().getLastEffectiveOp();
|
||||
if (node.getNode().getLastEffectiveOp() == null) return;
|
||||
LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
result.put(node.getNode().getLastEffectiveOp().timestamp(), node.getNode().getLastEffectiveOp());
|
||||
});
|
||||
walkTree(node -> {
|
||||
var op = node.lastEffectiveOp();
|
||||
if (node.lastEffectiveOp() == null) return;
|
||||
LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
result.put(node.lastEffectiveOp().timestamp(), node.lastEffectiveOp());
|
||||
});
|
||||
|
||||
for (var le : _storage.getLog().getAll()) {
|
||||
var op = le.getValue().op();
|
||||
LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
result.put(le.getKey(), le.getValue().op());
|
||||
}
|
||||
for (var le : _storage.getLog().getAll()) {
|
||||
var op = le.getValue().op();
|
||||
LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
result.put(le.getKey(), le.getValue().op());
|
||||
}
|
||||
|
||||
for (var op : result.values()) {
|
||||
LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
_opRecorder.recordOpForPeer(host, op);
|
||||
}
|
||||
} finally {
|
||||
_storage.rwUnlock();
|
||||
for (var op : result.values()) {
|
||||
LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId());
|
||||
_opRecorder.recordOpForPeer(host, op);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>(
|
||||
LogEffectOld<TimestampT, PeerIdT, MetaT, NodeIdT> oldInfo,
|
||||
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> effectiveOp,
|
||||
NodeIdT newParentId,
|
||||
MetaT newMeta,
|
||||
NodeIdT childId) {
|
||||
NodeIdT childId) implements Serializable {
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public record LogEffectOld<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> oldEffectiveMove,
|
||||
NodeIdT oldParent,
|
||||
MetaT oldMeta) {}
|
||||
MetaT oldMeta) implements Serializable {
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
public record LogRecord<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op,
|
||||
List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) {}
|
||||
List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) implements Serializable {
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
|
||||
(CombinedTimestamp<TimestampT, PeerIdT> timestamp, NodeIdT newParentId, MetaT newMeta,
|
||||
NodeIdT childId) {}
|
||||
NodeIdT childId) implements Serializable {
|
||||
}
|
||||
|
||||
@@ -4,32 +4,23 @@ public interface StorageInterface<
|
||||
TimestampT extends Comparable<TimestampT>,
|
||||
PeerIdT extends Comparable<PeerIdT>,
|
||||
MetaT extends NodeMeta,
|
||||
NodeIdT,
|
||||
WrapperT extends TreeNodeWrapper<TimestampT, PeerIdT, MetaT, NodeIdT>> {
|
||||
NodeIdT> {
|
||||
NodeIdT getRootId();
|
||||
|
||||
NodeIdT getTrashId();
|
||||
|
||||
NodeIdT getNewNodeId();
|
||||
|
||||
WrapperT getById(NodeIdT id);
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getById(NodeIdT id);
|
||||
|
||||
// Creates a node, returned wrapper is RW-locked
|
||||
WrapperT createNewNode(TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> node);
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> createNewNode(NodeIdT key, NodeIdT parent, MetaT meta);
|
||||
|
||||
void putNode(TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> node);
|
||||
|
||||
void removeNode(NodeIdT id);
|
||||
|
||||
LogInterface<TimestampT, PeerIdT, MetaT, NodeIdT> getLog();
|
||||
|
||||
PeerTimestampLogInterface<TimestampT, PeerIdT> getPeerTimestampLog();
|
||||
|
||||
void rLock();
|
||||
|
||||
void rUnlock();
|
||||
|
||||
void rwLock();
|
||||
|
||||
void rwUnlock();
|
||||
|
||||
void assertRwLock();
|
||||
}
|
||||
|
||||
@@ -1,31 +1,26 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.io.Serializable;
|
||||
import java.util.Map;
|
||||
|
||||
@Getter
|
||||
@Setter
|
||||
public class TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
|
||||
private final NodeIdT _id;
|
||||
private NodeIdT _parent = null;
|
||||
private OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> _lastEffectiveOp = null;
|
||||
private MetaT _meta = null;
|
||||
private Map<String, NodeIdT> _children = new HashMap<>();
|
||||
public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> extends Serializable {
|
||||
NodeIdT key();
|
||||
|
||||
public TreeNode(NodeIdT id, NodeIdT parent, MetaT meta) {
|
||||
_id = id;
|
||||
_meta = meta;
|
||||
_parent = parent;
|
||||
}
|
||||
NodeIdT parent();
|
||||
|
||||
public TreeNode(NodeIdT id, NodeIdT parent, MetaT meta, Map<String, NodeIdT> children) {
|
||||
_id = id;
|
||||
_meta = meta;
|
||||
_parent = parent;
|
||||
_children = children;
|
||||
}
|
||||
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp();
|
||||
|
||||
MetaT meta();
|
||||
|
||||
PMap<String, NodeIdT> children();
|
||||
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withParent(NodeIdT parent);
|
||||
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withLastEffectiveOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> lastEffectiveOp);
|
||||
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withMeta(MetaT meta);
|
||||
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> withChildren(PMap<String, NodeIdT> children);
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
public interface TreeNodeWrapper<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
|
||||
void rLock();
|
||||
|
||||
void rUnlock();
|
||||
|
||||
void rwLock();
|
||||
|
||||
void rwUnlock();
|
||||
|
||||
void freeze();
|
||||
|
||||
void unfreeze();
|
||||
|
||||
void notifyRef(NodeIdT id);
|
||||
|
||||
void notifyRmRef(NodeIdT id);
|
||||
|
||||
TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNode();
|
||||
}
|
||||
@@ -32,8 +32,8 @@ public class KleppmanTreeSimpleTest {
|
||||
Assertions.assertEquals(d1id, testNode2._tree.traverse(List.of("Test1")));
|
||||
Assertions.assertEquals(d2id, testNode2._tree.traverse(List.of("Test2")));
|
||||
|
||||
Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet());
|
||||
Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet());
|
||||
Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet());
|
||||
Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet());
|
||||
|
||||
var f1id = testNode1._storageInterface.getNewNodeId();
|
||||
|
||||
@@ -54,10 +54,10 @@ public class KleppmanTreeSimpleTest {
|
||||
testNode1._tree.move(d1id, new TestNodeMetaDir("Test2"), d2id);
|
||||
Assertions.assertEquals(d1id, testNode1._tree.traverse(List.of("Test1")));
|
||||
Assertions.assertEquals(d2id, testNode1._tree.traverse(List.of("Test1", "Test2")));
|
||||
Assertions.assertIterableEquals(List.of("Test1"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet());
|
||||
Assertions.assertIterableEquals(List.of("Test1"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet());
|
||||
|
||||
testNode2._tree.move(d2id, new TestNodeMetaDir("Test1"), d1id);
|
||||
Assertions.assertIterableEquals(List.of("Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet());
|
||||
Assertions.assertIterableEquals(List.of("Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet());
|
||||
Assertions.assertEquals(d2id, testNode2._tree.traverse(List.of("Test2")));
|
||||
Assertions.assertEquals(d1id, testNode2._tree.traverse(List.of("Test2", "Test1")));
|
||||
|
||||
@@ -72,8 +72,8 @@ public class KleppmanTreeSimpleTest {
|
||||
}
|
||||
|
||||
// Second node wins as it has smaller timestamp
|
||||
Assertions.assertIterableEquals(List.of("Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet());
|
||||
Assertions.assertIterableEquals(List.of("Test1", "TestFile"), testNode1._storageInterface.getById(d2id).getNode().getChildren().keySet());
|
||||
Assertions.assertIterableEquals(List.of("Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet());
|
||||
Assertions.assertIterableEquals(List.of("Test1", "TestFile"), testNode1._storageInterface.getById(d2id).children().keySet().stream().sorted().toList());
|
||||
Assertions.assertEquals(d2id, testNode1._tree.traverse(List.of("Test2")));
|
||||
Assertions.assertEquals(d1id, testNode1._tree.traverse(List.of("Test2", "Test1")));
|
||||
Assertions.assertEquals(f1id, testNode1._tree.traverse(List.of("Test2", "TestFile")));
|
||||
@@ -81,8 +81,8 @@ public class KleppmanTreeSimpleTest {
|
||||
var f11 = testNode1._storageInterface.getById(f1id);
|
||||
var f12 = testNode2._storageInterface.getById(f1id);
|
||||
|
||||
Assertions.assertEquals(f11.getNode().getMeta(), f12.getNode().getMeta());
|
||||
Assertions.assertInstanceOf(TestNodeMetaFile.class, f11.getNode().getMeta());
|
||||
Assertions.assertEquals(f11.meta(), f12.meta());
|
||||
Assertions.assertInstanceOf(TestNodeMetaFile.class, f11.meta());
|
||||
|
||||
// Trim test
|
||||
Assertions.assertTrue(testNode1._storageInterface.getLog().size() <= 1);
|
||||
|
||||
@@ -9,7 +9,7 @@ public class TestNode {
|
||||
protected final TestClock _clock;
|
||||
protected final TestPeerInterface _peerInterface;
|
||||
protected final TestStorageInterface _storageInterface;
|
||||
protected final KleppmannTree<Long, Long, TestNodeMeta, Long, TestNodeWrapper> _tree;
|
||||
protected final KleppmannTree<Long, Long, TestNodeMeta, Long> _tree;
|
||||
private final TestOpRecorder _recorder;
|
||||
|
||||
public TestNode(long id) {
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import lombok.Getter;
|
||||
|
||||
public abstract class TestNodeMeta implements NodeMeta {
|
||||
@Getter
|
||||
private final String _name;
|
||||
|
||||
public TestNodeMeta(String name) {_name = name;}
|
||||
public TestNodeMeta(String name) {
|
||||
_name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return _name;
|
||||
}
|
||||
|
||||
abstract public NodeMeta withName(String name);
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import lombok.Getter;
|
||||
|
||||
public class TestNodeMetaFile extends TestNodeMeta {
|
||||
@Getter
|
||||
private final long _inode;
|
||||
|
||||
public TestNodeMetaFile(String name, long inode) {
|
||||
@@ -11,6 +8,10 @@ public class TestNodeMetaFile extends TestNodeMeta {
|
||||
_inode = inode;
|
||||
}
|
||||
|
||||
public long getInode() {
|
||||
return _inode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodeMeta withName(String name) {
|
||||
return new TestNodeMetaFile(name, _inode);
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
public class TestNodeWrapper implements TreeNodeWrapper<Long, Long, TestNodeMeta, Long> {
|
||||
private final TreeNode<Long, Long, TestNodeMeta, Long> _backingNode;
|
||||
|
||||
public TestNodeWrapper(TreeNode<Long, Long, TestNodeMeta, Long> backingNode) {_backingNode = backingNode;}
|
||||
|
||||
@Override
|
||||
public void rLock() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rUnlock() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwLock() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwUnlock() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void freeze() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unfreeze() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void notifyRef(Long id) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void notifyRmRef(Long id) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public TreeNode<Long, Long, TestNodeMeta, Long> getNode() {
|
||||
return _backingNode;
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,9 @@ import java.util.List;
|
||||
public class TestPeerInterface implements PeerInterface<Long> {
|
||||
private final long selfId;
|
||||
|
||||
public TestPeerInterface(long selfId) {this.selfId = selfId;}
|
||||
public TestPeerInterface(long selfId) {
|
||||
this.selfId = selfId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getSelfId() {
|
||||
|
||||
@@ -3,17 +3,17 @@ package com.usatiuk.kleppmanntree;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class TestStorageInterface implements StorageInterface<Long, Long, TestNodeMeta, Long, TestNodeWrapper> {
|
||||
public class TestStorageInterface implements StorageInterface<Long, Long, TestNodeMeta, Long> {
|
||||
private final long _peerId;
|
||||
private final Map<Long, TreeNode<Long, Long, TestNodeMeta, Long>> _nodes = new HashMap<>();
|
||||
private final Map<Long, TestTreeNode> _nodes = new HashMap<>();
|
||||
private final TestLog _log = new TestLog();
|
||||
private final TestPeerLog _peerLog = new TestPeerLog();
|
||||
private long _curId = 1;
|
||||
|
||||
public TestStorageInterface(long peerId) {
|
||||
_peerId = peerId;
|
||||
_nodes.put(getRootId(), new TreeNode<>(getRootId(), null, null));
|
||||
_nodes.put(getTrashId(), new TreeNode<>(getTrashId(), null, null));
|
||||
_nodes.put(getRootId(), new TestTreeNode(getRootId(), null, null));
|
||||
_nodes.put(getTrashId(), new TestTreeNode(getTrashId(), null, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -32,18 +32,18 @@ public class TestStorageInterface implements StorageInterface<Long, Long, TestNo
|
||||
}
|
||||
|
||||
@Override
|
||||
public TestNodeWrapper getById(Long id) {
|
||||
var node = _nodes.get(id);
|
||||
return node == null ? null : new TestNodeWrapper(node);
|
||||
public TestTreeNode getById(Long id) {
|
||||
return _nodes.get(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TestNodeWrapper createNewNode(TreeNode<Long, Long, TestNodeMeta, Long> node) {
|
||||
if (!_nodes.containsKey(node.getId())) {
|
||||
_nodes.put(node.getId(), node);
|
||||
return new TestNodeWrapper(node);
|
||||
}
|
||||
throw new IllegalStateException("Node with id " + node.getId() + " already exists");
|
||||
public TestTreeNode createNewNode(Long key, Long parent, TestNodeMeta meta) {
|
||||
return new TestTreeNode(key, parent, meta);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putNode(TreeNode<Long, Long, TestNodeMeta, Long> node) {
|
||||
_nodes.put(node.key(), (TestTreeNode) node);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -53,7 +53,6 @@ public class TestStorageInterface implements StorageInterface<Long, Long, TestNo
|
||||
_nodes.remove(id);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public LogInterface<Long, Long, TestNodeMeta, Long> getLog() {
|
||||
return _log;
|
||||
@@ -64,29 +63,4 @@ public class TestStorageInterface implements StorageInterface<Long, Long, TestNo
|
||||
public PeerTimestampLogInterface<Long, Long> getPeerTimestampLog() {
|
||||
return _peerLog;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rLock() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rUnlock() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwLock() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwUnlock() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assertRwLock() {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import org.pcollections.HashTreePMap;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
public record TestTreeNode(Long key, Long parent, OpMove<Long, Long, TestNodeMeta, Long> lastEffectiveOp,
|
||||
TestNodeMeta meta,
|
||||
PMap<String, Long> children) implements TreeNode<Long, Long, TestNodeMeta, Long> {
|
||||
|
||||
public TestTreeNode(Long id, Long parent, TestNodeMeta meta) {
|
||||
this(id, parent, null, meta, HashTreePMap.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public TreeNode<Long, Long, TestNodeMeta, Long> withParent(Long parent) {
|
||||
return new TestTreeNode(key, parent, lastEffectiveOp, meta, children);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TreeNode<Long, Long, TestNodeMeta, Long> withLastEffectiveOp(OpMove<Long, Long, TestNodeMeta, Long> lastEffectiveOp) {
|
||||
return new TestTreeNode(key, parent, lastEffectiveOp, meta, children);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TreeNode<Long, Long, TestNodeMeta, Long> withMeta(TestNodeMeta meta) {
|
||||
return new TestTreeNode(key, parent, lastEffectiveOp, meta, children);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TreeNode<Long, Long, TestNodeMeta, Long> withChildren(PMap<String, Long> children) {
|
||||
return new TestTreeNode(key, parent, lastEffectiveOp, meta, children);
|
||||
}
|
||||
}
|
||||
112
dhfs-parent/objects/pom.xml
Normal file
112
dhfs-parent/objects/pom.xml
Normal file
@@ -0,0 +1,112 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>objects</artifactId>
|
||||
|
||||
<properties>
|
||||
<maven.compiler.source>21</maven.compiler.source>
|
||||
<maven.compiler.target>21</maven.compiler.target>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.openhft</groupId>
|
||||
<artifactId>zero-allocation-hashing</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter-engine</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.slf4j</groupId>
|
||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>supportlib</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5-mockito</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.lmdbjava</groupId>
|
||||
<artifactId>lmdbjava</artifactId>
|
||||
<version>0.9.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.pcollections</groupId>
|
||||
<artifactId>pcollections</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<extensions>true</extensions>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,24 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
public interface CloseableKvIterator<K extends Comparable<K>, V> extends Iterator<Pair<K, V>>, AutoCloseableNoThrow {
|
||||
K peekNextKey();
|
||||
|
||||
void skip();
|
||||
|
||||
K peekPrevKey();
|
||||
|
||||
Pair<K, V> prev();
|
||||
|
||||
boolean hasPrev();
|
||||
|
||||
void skipPrev();
|
||||
|
||||
default CloseableKvIterator<K, V> reversed() {
|
||||
return new ReversedKvIterator<>(this);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.dhfs.objects.transaction.Transaction;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.Optional;
|
||||
|
||||
@ApplicationScoped
|
||||
public class CurrentTransaction implements Transaction {
|
||||
@Inject
|
||||
TransactionManager transactionManager;
|
||||
|
||||
@Override
|
||||
public void onCommit(Runnable runnable) {
|
||||
transactionManager.current().onCommit(runnable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
transactionManager.current().onFlush(runnable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
return transactionManager.current().get(type, key, strategy);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(JObjectKey key) {
|
||||
transactionManager.current().delete(key);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Collection<JObjectKey> findAllObjects() {
|
||||
return transactionManager.current().findAllObjects();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return transactionManager.current().getIterator(start, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> void put(JData obj) {
|
||||
transactionManager.current().put(obj);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public record Data<V>(V value) implements MaybeTombstone<V> {
|
||||
@Override
|
||||
public Optional<V> opt() {
|
||||
return Optional.of(value);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface IterProdFn<K extends Comparable<K>, V> {
|
||||
CloseableKvIterator<K, V> get(IteratorStart start, K key);
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
// TODO: This could be maybe moved to a separate module?
|
||||
// The base class for JObject data
|
||||
// Only one instance of this "exists" per key, the instance in the manager is canonical
|
||||
// When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred.
|
||||
// It is immutable, its version is filled in by the allocator from the AllocVersionProvider
|
||||
public interface JData extends Serializable {
|
||||
JObjectKey key();
|
||||
|
||||
default int estimateSize() {
|
||||
return 100;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import jakarta.annotation.Nonnull;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public record JDataVersionedWrapper(@Nonnull JData data, long version) implements Serializable {
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
public record JObjectKey(String name) implements Serializable, Comparable<JObjectKey> {
|
||||
public static JObjectKey of(String name) {
|
||||
return new JObjectKey(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(JObjectKey o) {
|
||||
return name.compareTo(o.name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public byte[] bytes() {
|
||||
return name.getBytes(StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
public ByteBuffer toByteBuffer() {
|
||||
var heapBb = StandardCharsets.UTF_8.encode(name);
|
||||
if (heapBb.isDirect()) return heapBb;
|
||||
var directBb = UninitializedByteBuffer.allocateUninitialized(heapBb.remaining());
|
||||
directBb.put(heapBb);
|
||||
directBb.flip();
|
||||
return directBb;
|
||||
}
|
||||
|
||||
public static JObjectKey fromBytes(byte[] bytes) {
|
||||
return new JObjectKey(new String(bytes, StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
public static JObjectKey fromByteBuffer(ByteBuffer buff) {
|
||||
return new JObjectKey(StandardCharsets.UTF_8.decode(buff).toString());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,239 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.snapshot.SnapshotManager;
|
||||
import com.usatiuk.dhfs.objects.transaction.*;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.enterprise.inject.Instance;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
// Manages all access to com.usatiuk.dhfs.objects.JData objects.
|
||||
// In particular, it serves as a source of truth for what is committed to the backing storage.
|
||||
// All data goes through it, it is responsible for transaction atomicity
|
||||
// TODO: persistent tx id
|
||||
@ApplicationScoped
|
||||
public class JObjectManager {
|
||||
private final List<PreCommitTxHook> _preCommitTxHooks;
|
||||
private boolean _ready = false;
|
||||
@Inject
|
||||
SnapshotManager snapshotManager;
|
||||
@Inject
|
||||
TransactionFactory transactionFactory;
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
|
||||
private void verifyReady() {
|
||||
if (!_ready) throw new IllegalStateException("Wrong service order!");
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(200) StartupEvent event) {
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
JObjectManager(Instance<PreCommitTxHook> preCommitTxHooks) {
|
||||
_preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList();
|
||||
}
|
||||
|
||||
public TransactionPrivate createTransaction() {
|
||||
verifyReady();
|
||||
var tx = transactionFactory.createTransaction();
|
||||
Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id());
|
||||
return tx;
|
||||
}
|
||||
|
||||
public TransactionHandle commit(TransactionPrivate tx) {
|
||||
verifyReady();
|
||||
var writes = new LinkedHashMap<JObjectKey, TxRecord.TxObjectRecord<?>>();
|
||||
var dependenciesLocked = new LinkedHashMap<JObjectKey, Optional<JDataVersionedWrapper>>();
|
||||
Map<JObjectKey, TransactionObject<?>> readSet;
|
||||
var toUnlock = new ArrayList<AutoCloseableNoThrow>();
|
||||
|
||||
Consumer<JObjectKey> addDependency =
|
||||
key -> {
|
||||
dependenciesLocked.computeIfAbsent(key, k -> {
|
||||
var lock = lockManager.lockObject(k);
|
||||
toUnlock.add(lock);
|
||||
return snapshotManager.readObjectDirect(k);
|
||||
});
|
||||
};
|
||||
|
||||
// For existing objects:
|
||||
// Check that their version is not higher than the version of transaction being committed
|
||||
// TODO: check deletions, inserts
|
||||
try {
|
||||
try {
|
||||
Function<JObjectKey, JData> getCurrent =
|
||||
key -> switch (writes.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> write.data();
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> null;
|
||||
case null -> tx.readSource().get(JData.class, key).orElse(null);
|
||||
default -> {
|
||||
throw new TxCommitException("Unexpected value: " + writes.get(key));
|
||||
}
|
||||
};
|
||||
|
||||
boolean somethingChanged;
|
||||
do {
|
||||
somethingChanged = false;
|
||||
Map<JObjectKey, TxRecord.TxObjectRecord<?>> currentIteration = new HashMap();
|
||||
for (var hook : _preCommitTxHooks) {
|
||||
for (var n : tx.drainNewWrites())
|
||||
currentIteration.put(n.key(), n);
|
||||
Log.trace("Commit iteration with " + currentIteration.size() + " records for hook " + hook.getClass());
|
||||
|
||||
for (var entry : currentIteration.entrySet()) {
|
||||
somethingChanged = true;
|
||||
Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey());
|
||||
var oldObj = getCurrent.apply(entry.getKey());
|
||||
switch (entry.getValue()) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
if (oldObj == null) {
|
||||
hook.onCreate(write.key(), write.data());
|
||||
} else {
|
||||
hook.onChange(write.key(), oldObj, write.data());
|
||||
}
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
hook.onDelete(deleted.key(), oldObj);
|
||||
}
|
||||
default -> throw new TxCommitException("Unexpected value: " + entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
writes.putAll(currentIteration);
|
||||
} while (somethingChanged);
|
||||
|
||||
if (writes.isEmpty()) {
|
||||
Log.trace("Committing transaction - no changes");
|
||||
return new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
runnable.run();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
} finally {
|
||||
readSet = tx.reads();
|
||||
|
||||
Stream.concat(readSet.keySet().stream(), writes.keySet().stream())
|
||||
.sorted(Comparator.comparing(JObjectKey::toString))
|
||||
.forEach(addDependency);
|
||||
|
||||
for (var read : readSet.entrySet()) {
|
||||
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
|
||||
toUnlock.add(locked.lock());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Log.trace("Committing transaction start");
|
||||
var snapshotId = tx.snapshot().id();
|
||||
|
||||
for (var read : readSet.entrySet()) {
|
||||
var dep = dependenciesLocked.get(read.getKey());
|
||||
|
||||
if (dep.isEmpty() != read.getValue().data().isEmpty()) {
|
||||
Log.trace("Checking read dependency " + read.getKey() + " - not found");
|
||||
throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
|
||||
}
|
||||
|
||||
if (dep.isEmpty()) {
|
||||
// TODO: Every write gets a dependency due to hooks
|
||||
continue;
|
||||
// assert false;
|
||||
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
|
||||
}
|
||||
|
||||
if (dep.get().version() > snapshotId) {
|
||||
Log.trace("Checking dependency " + read.getKey() + " - newer than");
|
||||
throw new TxCommitException("Serialization hazard: " + dep.get().version() + " vs " + snapshotId);
|
||||
}
|
||||
|
||||
Log.trace("Checking dependency " + read.getKey() + " - ok with read");
|
||||
}
|
||||
|
||||
var addFlushCallback = snapshotManager.commitTx(
|
||||
writes.values().stream()
|
||||
.filter(r -> {
|
||||
if (r instanceof TxRecord.TxObjectRecordWrite<?>(JData data)) {
|
||||
var dep = dependenciesLocked.get(data.key());
|
||||
if (dep.isPresent() && dep.get().version() > snapshotId) {
|
||||
Log.trace("Skipping write " + data.key() + " - dependency " + dep.get().version() + " vs " + snapshotId);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}).toList());
|
||||
|
||||
for (var callback : tx.getOnCommit()) {
|
||||
callback.run();
|
||||
}
|
||||
|
||||
for (var callback : tx.getOnFlush()) {
|
||||
addFlushCallback.accept(callback);
|
||||
}
|
||||
|
||||
return new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
addFlushCallback.accept(runnable);
|
||||
}
|
||||
};
|
||||
} catch (Throwable t) {
|
||||
Log.trace("Error when committing transaction", t);
|
||||
throw new TxCommitException(t.getMessage(), t);
|
||||
} finally {
|
||||
for (var unlock : toUnlock) {
|
||||
unlock.close();
|
||||
}
|
||||
tx.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void rollback(TransactionPrivate tx) {
|
||||
verifyReady();
|
||||
tx.reads().forEach((key, value) -> {
|
||||
if (value instanceof TransactionObjectLocked<?> locked) {
|
||||
locked.lock().close();
|
||||
}
|
||||
});
|
||||
tx.close();
|
||||
}
|
||||
|
||||
// private class TransactionObjectSourceImpl implements TransactionObjectSource {
|
||||
// private final long _txId;
|
||||
//
|
||||
// private TransactionObjectSourceImpl(long txId) {
|
||||
// _txId = txId;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T extends JData> TransactionObject<T> get(Class<T> type, JObjectKey key) {
|
||||
// var got = getObj(type, key);
|
||||
// if (got.data().isPresent() && got.data().get().version() > _txId) {
|
||||
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
|
||||
// }
|
||||
// return got;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T extends JData> TransactionObject<T> getWriteLocked(Class<T> type, JObjectKey key) {
|
||||
// var got = getObjLock(type, key);
|
||||
// if (got.data().isPresent() && got.data().get().version() > _txId) {
|
||||
// got.lock().close();
|
||||
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
|
||||
// }
|
||||
// return got;
|
||||
// }
|
||||
// }
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
@ApplicationScoped
|
||||
public class JavaDataSerializer implements ObjectSerializer<JDataVersionedWrapper> {
|
||||
@Override
|
||||
public ByteString serialize(JDataVersionedWrapper obj) {
|
||||
return SerializationHelper.serialize((Serializable) obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JDataVersionedWrapper deserialize(ByteString data) {
|
||||
return SerializationHelper.deserialize(data.toByteArray());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<K, Boolean> _filter;
|
||||
private K _next;
|
||||
|
||||
public KeyPredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<K, Boolean> filter) {
|
||||
_goingForward = true;
|
||||
_backing = backing;
|
||||
_filter = filter;
|
||||
fillNext();
|
||||
|
||||
boolean shouldGoBack = false;
|
||||
if (start == IteratorStart.LE) {
|
||||
if (_next == null || _next.compareTo(startKey) > 0) {
|
||||
shouldGoBack = true;
|
||||
}
|
||||
} else if (start == IteratorStart.LT) {
|
||||
if (_next == null || _next.compareTo(startKey) >= 0) {
|
||||
shouldGoBack = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldGoBack && _backing.hasPrev()) {
|
||||
_goingForward = false;
|
||||
_next = null;
|
||||
fillNext();
|
||||
if (_next != null)
|
||||
_backing.skipPrev();
|
||||
_goingForward = true;
|
||||
// _backing.skip();
|
||||
fillNext();
|
||||
}
|
||||
|
||||
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) {
|
||||
var next = _goingForward ? _backing.peekNextKey() : _backing.peekPrevKey();
|
||||
if (!_filter.apply(next)) {
|
||||
if (_goingForward)
|
||||
_backing.skip();
|
||||
else
|
||||
_backing.skipPrev();
|
||||
continue;
|
||||
}
|
||||
_next = next;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
_goingForward = !_goingForward;
|
||||
_next = null;
|
||||
|
||||
fillNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected K peekImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException();
|
||||
return _next;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void skipImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException();
|
||||
_next = null;
|
||||
if (_goingForward)
|
||||
_backing.skip();
|
||||
else
|
||||
_backing.skipPrev();
|
||||
fillNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean hasImpl() {
|
||||
return _next != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<K, V> nextImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException("No more elements");
|
||||
var retKey = _next;
|
||||
_next = null;
|
||||
var got = _goingForward ? _backing.next() : _backing.prev();
|
||||
assert got.getKey().equals(retKey);
|
||||
fillNext();
|
||||
return got;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KeyPredicateKvIterator{" +
|
||||
"_backing=" + _backing +
|
||||
", _next=" + _next +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.DataLocker;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
@ApplicationScoped
|
||||
public class LockManager {
|
||||
private final DataLocker _objLocker = new DataLocker();
|
||||
|
||||
public AutoCloseableNoThrow lockObject(JObjectKey key) {
|
||||
return _objLocker.lock(key);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
public class MappingKvIterator<K extends Comparable<K>, V, V_T> implements CloseableKvIterator<K, V_T> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<V, V_T> _transformer;
|
||||
|
||||
public MappingKvIterator(CloseableKvIterator<K, V> backing, Function<V, V_T> transformer) {
|
||||
_backing = backing;
|
||||
_transformer = transformer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekNextKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekPrevKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V_T> prev() {
|
||||
var got = _backing.prev();
|
||||
return Pair.of(got.getKey(), _transformer.apply(got.getValue()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V_T> next() {
|
||||
var got = _backing.next();
|
||||
return Pair.of(got.getKey(), _transformer.apply(got.getValue()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MappingKvIterator{" +
|
||||
"_backing=" + _backing +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public interface MaybeTombstone<T> {
|
||||
Optional<T> opt();
|
||||
}
|
||||
@@ -0,0 +1,192 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final Map<CloseableKvIterator<K, V>, Integer> _iterators;
|
||||
private final NavigableMap<K, CloseableKvIterator<K, V>> _sortedIterators = new TreeMap<>();
|
||||
private final String _name;
|
||||
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
|
||||
_goingForward = true;
|
||||
_name = name;
|
||||
|
||||
IteratorStart initialStartType = startType;
|
||||
K initialStartKey = startKey;
|
||||
boolean fail = false;
|
||||
if (startType == IteratorStart.LT || startType == IteratorStart.LE) {
|
||||
// Starting at a greatest key less than/less or equal than:
|
||||
// We have a bunch of iterators that have given us theirs "greatest LT/LE key"
|
||||
// now we need to pick the greatest of those to start with
|
||||
// But if some of them don't have a lesser key, we need to pick the smallest of those
|
||||
var initialIterators = iterators.stream().map(p -> p.get(initialStartType, initialStartKey)).toList();
|
||||
try {
|
||||
IteratorStart finalStartType = startType;
|
||||
var found = initialIterators.stream()
|
||||
.filter(CloseableKvIterator::hasNext)
|
||||
.map((i) -> {
|
||||
var peeked = i.peekNextKey();
|
||||
// Log.warnv("peeked: {0}, from {1}", peeked, i.getClass());
|
||||
return peeked;
|
||||
}).distinct().collect(Collectors.partitioningBy(e -> finalStartType == IteratorStart.LE ? e.compareTo(initialStartKey) <= 0 : e.compareTo(initialStartKey) < 0));
|
||||
K initialMaxValue;
|
||||
if (!found.get(true).isEmpty())
|
||||
initialMaxValue = found.get(true).stream().max(Comparator.naturalOrder()).orElse(null);
|
||||
else
|
||||
initialMaxValue = found.get(false).stream().min(Comparator.naturalOrder()).orElse(null);
|
||||
if (initialMaxValue == null) {
|
||||
fail = true;
|
||||
}
|
||||
startKey = initialMaxValue;
|
||||
startType = IteratorStart.GE;
|
||||
} finally {
|
||||
initialIterators.forEach(CloseableKvIterator::close);
|
||||
}
|
||||
}
|
||||
|
||||
if (fail) {
|
||||
_iterators = Map.of();
|
||||
return;
|
||||
}
|
||||
|
||||
int counter = 0;
|
||||
var iteratorsTmp = new HashMap<CloseableKvIterator<K, V>, Integer>();
|
||||
for (var iteratorFn : iterators) {
|
||||
var iterator = iteratorFn.get(startType, startKey);
|
||||
iteratorsTmp.put(iterator, counter++);
|
||||
}
|
||||
_iterators = Map.copyOf(iteratorsTmp);
|
||||
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
advanceIterator(iterator);
|
||||
}
|
||||
|
||||
Log.tracev("{0} Created: {1}", _name, _sortedIterators);
|
||||
switch (initialStartType) {
|
||||
// case LT -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
|
||||
// }
|
||||
case GT -> {
|
||||
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
|
||||
this(name, startType, startKey, List.of(iterators));
|
||||
}
|
||||
|
||||
|
||||
private void advanceIterator(CloseableKvIterator<K, V> iterator) {
|
||||
if (!iterator.hasNext()) {
|
||||
return;
|
||||
}
|
||||
|
||||
K key = iterator.peekNextKey();
|
||||
Log.tracev("{0} Advance peeked: {1}-{2}", _name, iterator, key);
|
||||
if (!_sortedIterators.containsKey(key)) {
|
||||
_sortedIterators.put(key, iterator);
|
||||
return;
|
||||
}
|
||||
|
||||
// Expects that reversed iterator returns itself when reversed again
|
||||
var oursPrio = _iterators.get(_goingForward ? iterator : iterator.reversed());
|
||||
var them = _sortedIterators.get(key);
|
||||
var theirsPrio = _iterators.get(_goingForward ? them : them.reversed());
|
||||
if (oursPrio < theirsPrio) {
|
||||
_sortedIterators.put(key, iterator);
|
||||
advanceIterator(them);
|
||||
} else {
|
||||
Log.tracev("{0} Skipped: {1}", _name, iterator.peekNextKey());
|
||||
iterator.skip();
|
||||
advanceIterator(iterator);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
Log.tracev("{0} Reversing from {1}", _name, cur);
|
||||
_goingForward = !_goingForward;
|
||||
_sortedIterators.clear();
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
// _goingForward inverted already
|
||||
advanceIterator(!_goingForward ? iterator.reversed() : iterator);
|
||||
}
|
||||
if (_sortedIterators.isEmpty() || cur == null) {
|
||||
return;
|
||||
}
|
||||
// Advance to the expected key, as we might have brought back some iterators
|
||||
// that were at their ends
|
||||
while (!_sortedIterators.isEmpty()
|
||||
&& ((_goingForward && peekImpl().compareTo(cur.getKey()) <= 0)
|
||||
|| (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) {
|
||||
skipImpl();
|
||||
}
|
||||
Log.tracev("{0} Reversed to {1}", _name, _sortedIterators);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected K peekImpl() {
|
||||
if (_sortedIterators.isEmpty())
|
||||
throw new NoSuchElementException();
|
||||
return _goingForward ? _sortedIterators.firstKey() : _sortedIterators.lastKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void skipImpl() {
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
if (cur == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
cur.getValue().skip();
|
||||
advanceIterator(cur.getValue());
|
||||
Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean hasImpl() {
|
||||
return !_sortedIterators.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<K, V> nextImpl() {
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
if (cur == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
var curVal = cur.getValue().next();
|
||||
advanceIterator(cur.getValue());
|
||||
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, cur.getValue(), curVal, _sortedIterators.keySet());
|
||||
return curVal;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
|
||||
iterator.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MergingKvIterator{" +
|
||||
"_name='" + _name + '\'' +
|
||||
", _sortedIterators=" + _sortedIterators.keySet() +
|
||||
", _iterators=" + _iterators +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,104 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
public class NavigableMapKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final NavigableMap<K, V> _map;
|
||||
private Iterator<Map.Entry<K, V>> _iterator;
|
||||
private Map.Entry<K, V> _next;
|
||||
|
||||
public NavigableMapKvIterator(NavigableMap<K, V> map, IteratorStart start, K key) {
|
||||
_map = map;
|
||||
SortedMap<K, V> _view;
|
||||
_goingForward = true;
|
||||
switch (start) {
|
||||
case GE -> _view = map.tailMap(key, true);
|
||||
case GT -> _view = map.tailMap(key, false);
|
||||
case LE -> {
|
||||
var floorKey = map.floorKey(key);
|
||||
if (floorKey == null) _view = _map;
|
||||
else _view = map.tailMap(floorKey, true);
|
||||
}
|
||||
case LT -> {
|
||||
var lowerKey = map.lowerKey(key);
|
||||
if (lowerKey == null) _view = _map;
|
||||
else _view = map.tailMap(lowerKey, true);
|
||||
}
|
||||
default -> throw new IllegalArgumentException("Unknown start type");
|
||||
}
|
||||
_iterator = _view.entrySet().iterator();
|
||||
fillNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
var oldNext = _next;
|
||||
_next = null;
|
||||
if (_goingForward) {
|
||||
_iterator
|
||||
= oldNext == null
|
||||
? _map.descendingMap().entrySet().iterator()
|
||||
: _map.headMap(oldNext.getKey(), false).descendingMap().entrySet().iterator();
|
||||
} else {
|
||||
_iterator
|
||||
= oldNext == null
|
||||
? _map.entrySet().iterator()
|
||||
: _map.tailMap(oldNext.getKey(), false).entrySet().iterator();
|
||||
}
|
||||
_goingForward = !_goingForward;
|
||||
fillNext();
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
while (_iterator.hasNext() && _next == null) {
|
||||
_next = _iterator.next();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected K peekImpl() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
return _next.getKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void skipImpl() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
_next = null;
|
||||
fillNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean hasImpl() {
|
||||
return _next != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<K, V> nextImpl() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
var ret = _next;
|
||||
_next = null;
|
||||
fillNext();
|
||||
return Pair.of(ret);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NavigableMapKvIterator{" +
|
||||
", _next=" + _next +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
public interface ObjectSerializer<T> {
|
||||
ByteString serialize(T obj);
|
||||
|
||||
T deserialize(ByteString data);
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry {
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry {
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public interface PendingWriteEntry {
|
||||
long bundleId();
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public interface PreCommitTxHook {
|
||||
default void onChange(JObjectKey key, JData old, JData cur) {
|
||||
}
|
||||
|
||||
default void onCreate(JObjectKey key, JData cur) {
|
||||
}
|
||||
|
||||
default void onDelete(JObjectKey key, JData cur) {
|
||||
}
|
||||
|
||||
default int getPriority() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends ReversibleKvIterator<K, V_T> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<V, V_T> _transformer;
|
||||
private Pair<K, V_T> _next;
|
||||
|
||||
public PredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<V, V_T> transformer) {
|
||||
_goingForward = true;
|
||||
_backing = backing;
|
||||
_transformer = transformer;
|
||||
fillNext();
|
||||
|
||||
boolean shouldGoBack = false;
|
||||
if (start == IteratorStart.LE) {
|
||||
if (_next == null || _next.getKey().compareTo(startKey) > 0) {
|
||||
shouldGoBack = true;
|
||||
}
|
||||
} else if (start == IteratorStart.LT) {
|
||||
if (_next == null || _next.getKey().compareTo(startKey) >= 0) {
|
||||
shouldGoBack = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldGoBack && _backing.hasPrev()) {
|
||||
_goingForward = false;
|
||||
_next = null;
|
||||
_backing.skipPrev();
|
||||
fillNext();
|
||||
_goingForward = true;
|
||||
_backing.skip();
|
||||
fillNext();
|
||||
}
|
||||
|
||||
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.getKey().compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.getKey().compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) {
|
||||
var next = _goingForward ? _backing.next() : _backing.prev();
|
||||
var transformed = _transformer.apply(next.getValue());
|
||||
if (transformed == null)
|
||||
continue;
|
||||
_next = Pair.of(next.getKey(), transformed);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
_goingForward = !_goingForward;
|
||||
boolean wasAtEnd = _next == null;
|
||||
|
||||
if (_goingForward && !wasAtEnd)
|
||||
_backing.skip();
|
||||
else if (!_goingForward && !wasAtEnd)
|
||||
_backing.skipPrev();
|
||||
|
||||
if (!wasAtEnd)
|
||||
Log.tracev("Skipped in reverse: {0}", _next);
|
||||
|
||||
_next = null;
|
||||
|
||||
fillNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected K peekImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException();
|
||||
return _next.getKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void skipImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException();
|
||||
_next = null;
|
||||
fillNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean hasImpl() {
|
||||
return _next != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<K, V_T> nextImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException("No more elements");
|
||||
var ret = _next;
|
||||
_next = null;
|
||||
fillNext();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PredicateKvIterator{" +
|
||||
"_backing=" + _backing +
|
||||
", _next=" + _next +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
public class ReversedKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
|
||||
public ReversedKvIterator(CloseableKvIterator<K, V> backing) {
|
||||
_backing = backing;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> next() {
|
||||
return _backing.prev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekNextKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekPrevKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> prev() {
|
||||
return _backing.next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<K, V> reversed() {
|
||||
return _backing;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
public abstract class ReversibleKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
|
||||
protected boolean _goingForward;
|
||||
|
||||
protected abstract void reverse();
|
||||
|
||||
private void ensureForward() {
|
||||
if (!_goingForward) {
|
||||
reverse();
|
||||
}
|
||||
}
|
||||
|
||||
private void ensureBackward() {
|
||||
if (_goingForward) {
|
||||
reverse();
|
||||
}
|
||||
}
|
||||
|
||||
abstract protected K peekImpl();
|
||||
|
||||
abstract protected void skipImpl();
|
||||
|
||||
abstract protected boolean hasImpl();
|
||||
|
||||
abstract protected Pair<K, V> nextImpl();
|
||||
|
||||
@Override
|
||||
public K peekNextKey() {
|
||||
ensureForward();
|
||||
return peekImpl();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
ensureForward();
|
||||
skipImpl();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
ensureForward();
|
||||
return hasImpl();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> next() {
|
||||
ensureForward();
|
||||
return nextImpl();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekPrevKey() {
|
||||
ensureBackward();
|
||||
return peekImpl();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> prev() {
|
||||
ensureBackward();
|
||||
return nextImpl();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
ensureBackward();
|
||||
return hasImpl();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
ensureBackward();
|
||||
skipImpl();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public record Tombstone<V>() implements MaybeTombstone<V> {
|
||||
@Override
|
||||
public Optional<V> opt() {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class TombstoneMergingKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final String _name;
|
||||
|
||||
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, MaybeTombstone<V>>> iterators) {
|
||||
_name = name;
|
||||
_backing = new PredicateKvIterator<>(
|
||||
new MergingKvIterator<>(name + "-merging", startType, startKey, iterators),
|
||||
startType, startKey,
|
||||
pair -> {
|
||||
Log.tracev("{0} - Processing pair {1}", _name, pair);
|
||||
if (pair instanceof Tombstone) {
|
||||
return null;
|
||||
}
|
||||
return ((Data<V>) pair).value();
|
||||
});
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, MaybeTombstone<V>>... iterators) {
|
||||
this(name, startType, startKey, List.of(iterators));
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekNextKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekPrevKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> prev() {
|
||||
return _backing.prev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> next() {
|
||||
return _backing.next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TombstoneMergingKvIterator{" +
|
||||
"_backing=" + _backing +
|
||||
", _name='" + _name + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.transaction.Transaction;
|
||||
import com.usatiuk.dhfs.objects.transaction.TransactionHandle;
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
import io.quarkus.logging.Log;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public interface TransactionManager {
|
||||
void begin();
|
||||
|
||||
TransactionHandle commit();
|
||||
|
||||
void rollback();
|
||||
|
||||
default <T> T runTries(Supplier<T> supplier, int tries) {
|
||||
if (current() != null) {
|
||||
return supplier.get();
|
||||
}
|
||||
|
||||
begin();
|
||||
T ret;
|
||||
try {
|
||||
ret = supplier.get();
|
||||
} catch (TxCommitException txCommitException) {
|
||||
rollback();
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
return runTries(supplier, tries - 1);
|
||||
} catch (Throwable e) {
|
||||
rollback();
|
||||
throw e;
|
||||
}
|
||||
try {
|
||||
commit();
|
||||
return ret;
|
||||
} catch (TxCommitException txCommitException) {
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
return runTries(supplier, tries - 1);
|
||||
}
|
||||
}
|
||||
|
||||
default TransactionHandle runTries(VoidFn fn, int tries) {
|
||||
if (current() != null) {
|
||||
fn.apply();
|
||||
return new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
current().onCommit(runnable);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
begin();
|
||||
try {
|
||||
fn.apply();
|
||||
} catch (TxCommitException txCommitException) {
|
||||
rollback();
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
return runTries(fn, tries - 1);
|
||||
} catch (Throwable e) {
|
||||
rollback();
|
||||
throw e;
|
||||
}
|
||||
try {
|
||||
return commit();
|
||||
} catch (TxCommitException txCommitException) {
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
return runTries(fn, tries - 1);
|
||||
}
|
||||
}
|
||||
|
||||
default TransactionHandle run(VoidFn fn) {
|
||||
return runTries(fn, 10);
|
||||
}
|
||||
|
||||
default <T> T run(Supplier<T> supplier) {
|
||||
return runTries(supplier, 10);
|
||||
}
|
||||
|
||||
default void executeTx(VoidFn fn) {
|
||||
run(fn);
|
||||
}
|
||||
|
||||
default <T> T executeTx(Supplier<T> supplier) {
|
||||
return run(supplier);
|
||||
}
|
||||
|
||||
Transaction current();
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.transaction.Transaction;
|
||||
import com.usatiuk.dhfs.objects.transaction.TransactionHandle;
|
||||
import com.usatiuk.dhfs.objects.transaction.TransactionPrivate;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TransactionManagerImpl implements TransactionManager {
|
||||
private static final ThreadLocal<TransactionPrivate> _currentTransaction = new ThreadLocal<>();
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
|
||||
@Override
|
||||
public void begin() {
|
||||
if (_currentTransaction.get() != null) {
|
||||
throw new IllegalStateException("Transaction already started");
|
||||
}
|
||||
|
||||
Log.trace("Starting transaction");
|
||||
var tx = jObjectManager.createTransaction();
|
||||
_currentTransaction.set(tx);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionHandle commit() {
|
||||
if (_currentTransaction.get() == null) {
|
||||
throw new IllegalStateException("No transaction started");
|
||||
}
|
||||
|
||||
Log.trace("Committing transaction");
|
||||
try {
|
||||
return jObjectManager.commit(_currentTransaction.get());
|
||||
} catch (Throwable e) {
|
||||
Log.trace("Transaction commit failed", e);
|
||||
throw e;
|
||||
} finally {
|
||||
_currentTransaction.get().close();
|
||||
_currentTransaction.remove();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
if (_currentTransaction.get() == null) {
|
||||
throw new IllegalStateException("No transaction started");
|
||||
}
|
||||
|
||||
try {
|
||||
jObjectManager.rollback(_currentTransaction.get());
|
||||
} catch (Throwable e) {
|
||||
Log.error("Transaction rollback failed", e);
|
||||
throw e;
|
||||
} finally {
|
||||
_currentTransaction.get().close();
|
||||
_currentTransaction.remove();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Transaction current() {
|
||||
return _currentTransaction.get();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.transaction.TransactionObject;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public record TransactionObjectLocked<T extends JData>
|
||||
(Optional<JDataVersionedWrapper> data, AutoCloseableNoThrow lock)
|
||||
implements TransactionObject<T> {
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.transaction.TransactionObject;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public record TransactionObjectNoLock<T extends JData>
|
||||
(Optional<JDataVersionedWrapper> data)
|
||||
implements TransactionObject<T> {
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
public class TxCommitException extends RuntimeException {
|
||||
public TxCommitException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public TxCommitException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,490 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.persistence.TxManifestObj;
|
||||
import com.usatiuk.dhfs.objects.transaction.TxRecord;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import org.pcollections.PSortedMap;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
@ApplicationScoped
|
||||
public class WritebackObjectPersistentStore {
|
||||
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
|
||||
|
||||
private final AtomicReference<PSortedMap<JObjectKey, PendingWriteEntry>> _pendingWrites = new AtomicReference<>(TreePMap.empty());
|
||||
private final ReentrantReadWriteLock _pendingWritesVersionLock = new ReentrantReadWriteLock();
|
||||
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
|
||||
|
||||
private final Object _flushWaitSynchronizer = new Object();
|
||||
private final AtomicLong _lastWrittenTx = new AtomicLong(-1);
|
||||
private final AtomicLong _counter = new AtomicLong();
|
||||
private final AtomicLong _lastCommittedTx = new AtomicLong(-1);
|
||||
private final AtomicLong _waitedTotal = new AtomicLong(0);
|
||||
@Inject
|
||||
CachingObjectPersistentStore cachedStore;
|
||||
@ConfigProperty(name = "dhfs.objects.writeback.limit")
|
||||
long sizeLimit;
|
||||
private long currentSize = 0;
|
||||
private ExecutorService _writebackExecutor;
|
||||
private ExecutorService _statusExecutor;
|
||||
private volatile boolean _ready = false;
|
||||
|
||||
void init(@Observes @Priority(110) StartupEvent event) {
|
||||
{
|
||||
BasicThreadFactory factory = new BasicThreadFactory.Builder()
|
||||
.namingPattern("tx-writeback-%d")
|
||||
.build();
|
||||
|
||||
_writebackExecutor = Executors.newSingleThreadExecutor(factory);
|
||||
_writebackExecutor.submit(this::writeback);
|
||||
}
|
||||
|
||||
_statusExecutor = Executors.newSingleThreadExecutor();
|
||||
_statusExecutor.submit(() -> {
|
||||
try {
|
||||
while (true) {
|
||||
Thread.sleep(1000);
|
||||
if (currentSize > 0)
|
||||
Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB");
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
});
|
||||
_counter.set(cachedStore.getLastTxId());
|
||||
_lastCommittedTx.set(cachedStore.getLastTxId());
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException {
|
||||
Log.info("Waiting for all transactions to drain");
|
||||
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
_ready = false;
|
||||
while (currentSize > 0) {
|
||||
_flushWaitSynchronizer.wait();
|
||||
}
|
||||
}
|
||||
|
||||
_writebackExecutor.shutdownNow();
|
||||
Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms");
|
||||
}
|
||||
|
||||
private void verifyReady() {
|
||||
if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!");
|
||||
}
|
||||
|
||||
private void writeback() {
|
||||
while (!Thread.interrupted()) {
|
||||
try {
|
||||
TxBundle bundle = new TxBundle(0);
|
||||
synchronized (_pendingBundles) {
|
||||
while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready)
|
||||
_pendingBundles.wait();
|
||||
|
||||
long diff = 0;
|
||||
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var toCompress = _pendingBundles.poll();
|
||||
diff -= toCompress.calculateTotalSize();
|
||||
bundle.compress(toCompress);
|
||||
}
|
||||
diff += bundle.calculateTotalSize();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize += diff;
|
||||
}
|
||||
}
|
||||
|
||||
var toWrite = new ArrayList<Pair<JObjectKey, JDataVersionedWrapper>>();
|
||||
var toDelete = new ArrayList<JObjectKey>();
|
||||
|
||||
for (var e : bundle._entries.values()) {
|
||||
switch (e) {
|
||||
case TxBundle.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> {
|
||||
Log.trace("Writing new " + key);
|
||||
toWrite.add(Pair.of(key, data));
|
||||
}
|
||||
case TxBundle.DeletedEntry(JObjectKey key) -> {
|
||||
Log.trace("Deleting from persistent storage " + key);
|
||||
toDelete.add(key);
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + e);
|
||||
}
|
||||
}
|
||||
|
||||
cachedStore.commitTx(
|
||||
new TxManifestObj<>(
|
||||
Collections.unmodifiableList(toWrite),
|
||||
Collections.unmodifiableList(toDelete)
|
||||
), bundle.getId());
|
||||
|
||||
Log.trace("Bundle " + bundle.getId() + " committed");
|
||||
|
||||
// Remove from pending writes, after real commit
|
||||
// As we are the only writers to _pendingWrites, no need to synchronize with iterator creation
|
||||
// if they get the older version, as it will still contain all the new changes
|
||||
synchronized (_pendingBundles) {
|
||||
var curPw = _pendingWrites.get();
|
||||
for (var e : bundle._entries.values()) {
|
||||
var cur = curPw.get(e.key());
|
||||
if (cur.bundleId() <= bundle.getId())
|
||||
curPw = curPw.minus(e.key());
|
||||
}
|
||||
_pendingWrites.set(curPw);
|
||||
// No need to increment version
|
||||
}
|
||||
|
||||
List<List<Runnable>> callbacks = new ArrayList<>();
|
||||
synchronized (_notFlushedBundles) {
|
||||
_lastWrittenTx.set(bundle.getId());
|
||||
while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) {
|
||||
callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted());
|
||||
}
|
||||
}
|
||||
callbacks.forEach(l -> l.forEach(Runnable::run));
|
||||
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize -= bundle.calculateTotalSize();
|
||||
// FIXME:
|
||||
if (currentSize <= sizeLimit || !_ready)
|
||||
_flushWaitSynchronizer.notifyAll();
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
} catch (Exception e) {
|
||||
Log.error("Uncaught exception in writeback", e);
|
||||
} catch (Throwable o) {
|
||||
Log.error("Uncaught THROWABLE in writeback", o);
|
||||
}
|
||||
}
|
||||
Log.info("Writeback thread exiting");
|
||||
}
|
||||
|
||||
|
||||
public TxBundle createBundle() {
|
||||
verifyReady();
|
||||
boolean wait = false;
|
||||
while (true) {
|
||||
if (wait) {
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
long started = System.currentTimeMillis();
|
||||
while (currentSize > sizeLimit) {
|
||||
try {
|
||||
_flushWaitSynchronizer.wait();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
long waited = System.currentTimeMillis() - started;
|
||||
_waitedTotal.addAndGet(waited);
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms");
|
||||
wait = false;
|
||||
}
|
||||
}
|
||||
synchronized (_pendingBundles) {
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
if (currentSize > sizeLimit) {
|
||||
if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var target = _pendingBundles.poll();
|
||||
|
||||
long diff = -target.calculateTotalSize();
|
||||
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var toCompress = _pendingBundles.poll();
|
||||
diff -= toCompress.calculateTotalSize();
|
||||
target.compress(toCompress);
|
||||
}
|
||||
diff += target.calculateTotalSize();
|
||||
currentSize += diff;
|
||||
_pendingBundles.addFirst(target);
|
||||
}
|
||||
}
|
||||
|
||||
if (currentSize > sizeLimit) {
|
||||
wait = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
synchronized (_notFlushedBundles) {
|
||||
var bundle = new TxBundle(_counter.incrementAndGet());
|
||||
_pendingBundles.addLast(bundle);
|
||||
_notFlushedBundles.put(bundle.getId(), bundle);
|
||||
return bundle;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void commitBundle(TxBundle bundle) {
|
||||
verifyReady();
|
||||
_pendingWritesVersionLock.writeLock().lock();
|
||||
try {
|
||||
synchronized (_pendingBundles) {
|
||||
var curPw = _pendingWrites.get();
|
||||
for (var e : ((TxBundle) bundle)._entries.values()) {
|
||||
switch (e) {
|
||||
case TxBundle.CommittedEntry c -> {
|
||||
curPw = curPw.plus(c.key(), new PendingWrite(c.data, bundle.getId()));
|
||||
}
|
||||
case TxBundle.DeletedEntry d -> {
|
||||
curPw = curPw.plus(d.key(), new PendingDelete(d.key, bundle.getId()));
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + e);
|
||||
}
|
||||
}
|
||||
// Now, make the changes visible to new iterators
|
||||
_pendingWrites.set(curPw);
|
||||
((TxBundle) bundle).setReady();
|
||||
if (_pendingBundles.peek() == bundle)
|
||||
_pendingBundles.notify();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize += ((TxBundle) bundle).calculateTotalSize();
|
||||
}
|
||||
}
|
||||
assert bundle.getId() > _lastCommittedTx.get();
|
||||
_lastCommittedTx.set(bundle.getId());
|
||||
} finally {
|
||||
_pendingWritesVersionLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public void dropBundle(TxBundle bundle) {
|
||||
verifyReady();
|
||||
synchronized (_pendingBundles) {
|
||||
Log.warn("Dropped bundle: " + bundle);
|
||||
_pendingBundles.remove((TxBundle) bundle);
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize -= ((TxBundle) bundle).calculateTotalSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void fence(long bundleId) {
|
||||
var latch = new CountDownLatch(1);
|
||||
asyncFence(bundleId, latch::countDown);
|
||||
try {
|
||||
latch.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void asyncFence(long bundleId, Runnable fn) {
|
||||
verifyReady();
|
||||
if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!");
|
||||
if (_lastWrittenTx.get() >= bundleId) {
|
||||
fn.run();
|
||||
return;
|
||||
}
|
||||
synchronized (_notFlushedBundles) {
|
||||
if (_lastWrittenTx.get() >= bundleId) {
|
||||
fn.run();
|
||||
return;
|
||||
}
|
||||
_notFlushedBundles.get(bundleId).addCallback(fn);
|
||||
}
|
||||
}
|
||||
|
||||
private static class TxBundle {
|
||||
private final LinkedHashMap<JObjectKey, BundleEntry> _entries = new LinkedHashMap<>();
|
||||
private final ArrayList<Runnable> _callbacks = new ArrayList<>();
|
||||
private long _txId;
|
||||
private volatile boolean _ready = false;
|
||||
private long _size = -1;
|
||||
private boolean _wasCommitted = false;
|
||||
|
||||
private TxBundle(long txId) {
|
||||
_txId = txId;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return _txId;
|
||||
}
|
||||
|
||||
public void setReady() {
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
public void addCallback(Runnable callback) {
|
||||
synchronized (_callbacks) {
|
||||
if (_wasCommitted) throw new IllegalStateException();
|
||||
_callbacks.add(callback);
|
||||
}
|
||||
}
|
||||
|
||||
public List<Runnable> setCommitted() {
|
||||
synchronized (_callbacks) {
|
||||
_wasCommitted = true;
|
||||
return Collections.unmodifiableList(_callbacks);
|
||||
}
|
||||
}
|
||||
|
||||
public void commit(JDataVersionedWrapper obj) {
|
||||
synchronized (_entries) {
|
||||
_entries.put(obj.data().key(), new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize()));
|
||||
}
|
||||
}
|
||||
|
||||
public void delete(JObjectKey obj) {
|
||||
synchronized (_entries) {
|
||||
_entries.put(obj, new DeletedEntry(obj));
|
||||
}
|
||||
}
|
||||
|
||||
public long calculateTotalSize() {
|
||||
if (_size >= 0) return _size;
|
||||
_size = _entries.values().stream().mapToInt(BundleEntry::size).sum();
|
||||
return _size;
|
||||
}
|
||||
|
||||
public void compress(TxBundle other) {
|
||||
if (_txId >= other._txId)
|
||||
throw new IllegalArgumentException("Compressing an older bundle into newer");
|
||||
|
||||
_txId = other._txId;
|
||||
_size = -1;
|
||||
|
||||
_entries.putAll(other._entries);
|
||||
}
|
||||
|
||||
private interface BundleEntry {
|
||||
JObjectKey key();
|
||||
|
||||
int size();
|
||||
}
|
||||
|
||||
private record CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size)
|
||||
implements BundleEntry {
|
||||
}
|
||||
|
||||
private record DeletedEntry(JObjectKey key)
|
||||
implements BundleEntry {
|
||||
|
||||
public int size() {
|
||||
return 64;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public Optional<PendingWriteEntry> getPendingWrite(JObjectKey key) {
|
||||
synchronized (_pendingBundles) {
|
||||
return Optional.ofNullable(_pendingWrites.get().get(key));
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
|
||||
var pending = getPendingWrite(name).orElse(null);
|
||||
return switch (pending) {
|
||||
case PendingWrite write -> Optional.of(write.data());
|
||||
case PendingDelete ignored -> Optional.empty();
|
||||
case null -> cachedStore.readObject(name);
|
||||
default -> throw new IllegalStateException("Unexpected value: " + pending);
|
||||
};
|
||||
}
|
||||
|
||||
public interface VerboseReadResult {
|
||||
}
|
||||
|
||||
public record VerboseReadResultPersisted(Optional<JDataVersionedWrapper> data) implements VerboseReadResult {
|
||||
}
|
||||
|
||||
public record VerboseReadResultPending(PendingWriteEntry pending) implements VerboseReadResult {
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public VerboseReadResult readObjectVerbose(JObjectKey key) {
|
||||
var pending = getPendingWrite(key).orElse(null);
|
||||
if (pending != null) {
|
||||
return new VerboseReadResultPending(pending);
|
||||
}
|
||||
return new VerboseReadResultPersisted(cachedStore.readObject(key));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param commitLocked - a function that will be called with a Consumer of a new transaction id,
|
||||
* that will commit the transaction the changes in the store will be visible to new transactions
|
||||
* only after the runnable is called
|
||||
*/
|
||||
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes, BiConsumer<Long, Runnable> commitLocked) {
|
||||
var bundle = createBundle();
|
||||
long bundleId = bundle.getId();
|
||||
try {
|
||||
for (var action : writes) {
|
||||
switch (action) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
Log.trace("Flushing object " + write.key());
|
||||
bundle.commit(new JDataVersionedWrapper(write.data(), bundleId));
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
Log.trace("Deleting object " + deleted.key());
|
||||
bundle.delete(deleted.key());
|
||||
}
|
||||
default -> {
|
||||
throw new TxCommitException("Unexpected value: " + action.key());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
dropBundle(bundle);
|
||||
throw new TxCommitException(t.getMessage(), t);
|
||||
}
|
||||
|
||||
|
||||
Log.tracef("Committing transaction %d to storage", bundleId);
|
||||
commitLocked.accept(bundleId, () -> {
|
||||
commitBundle(bundle);
|
||||
});
|
||||
|
||||
return r -> asyncFence(bundleId, r);
|
||||
}
|
||||
|
||||
// Returns an iterator with a view of all commited objects
|
||||
// Does not have to guarantee consistent view, snapshots are handled by upper layers
|
||||
// Invalidated by commitBundle, but might return data after it has been really committed
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
Log.tracev("Getting writeback iterator: {0}, {1}", start, key);
|
||||
_pendingWritesVersionLock.readLock().lock();
|
||||
try {
|
||||
var curPending = _pendingWrites.get();
|
||||
return new TombstoneMergingKvIterator<>("writeback-ps", start, key,
|
||||
(tS, tK) -> new MappingKvIterator<>(
|
||||
new NavigableMapKvIterator<>(curPending, tS, tK),
|
||||
e -> switch (e) {
|
||||
case PendingWrite pw -> new Data<>(pw.data());
|
||||
case PendingDelete d -> new Tombstone<>();
|
||||
default -> throw new IllegalStateException("Unexpected value: " + e);
|
||||
}),
|
||||
(tS, tK) -> cachedStore.getIterator(tS, tK));
|
||||
} finally {
|
||||
_pendingWritesVersionLock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public long getLastTxId() {
|
||||
_pendingWritesVersionLock.readLock().lock();
|
||||
try {
|
||||
return _lastCommittedTx.get();
|
||||
} finally {
|
||||
_pendingWritesVersionLock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,231 @@
|
||||
package com.usatiuk.dhfs.objects.persistence;
|
||||
|
||||
import com.usatiuk.dhfs.objects.*;
|
||||
import com.usatiuk.dhfs.utils.DataLocker;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.Startup;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
@ApplicationScoped
|
||||
public class CachingObjectPersistentStore {
|
||||
private final LinkedHashMap<JObjectKey, CacheEntry> _cache = new LinkedHashMap<>(8, 0.75f, true);
|
||||
private TreePMap<JObjectKey, CacheEntry> _sortedCache = TreePMap.empty();
|
||||
private long _cacheVersion = 0;
|
||||
|
||||
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
|
||||
private final DataLocker _locker = new DataLocker();
|
||||
|
||||
@Inject
|
||||
SerializingObjectPersistentStore delegate;
|
||||
@ConfigProperty(name = "dhfs.objects.lru.limit")
|
||||
long sizeLimit;
|
||||
@ConfigProperty(name = "dhfs.objects.lru.print-stats")
|
||||
boolean printStats;
|
||||
|
||||
private long _curSize = 0;
|
||||
private long _evict = 0;
|
||||
|
||||
private ExecutorService _statusExecutor = null;
|
||||
|
||||
@Startup
|
||||
void init() {
|
||||
if (printStats) {
|
||||
_statusExecutor = Executors.newSingleThreadExecutor();
|
||||
_statusExecutor.submit(() -> {
|
||||
try {
|
||||
while (true) {
|
||||
Thread.sleep(10000);
|
||||
if (_curSize > 0)
|
||||
Log.info("Cache status: size=" + _curSize / 1024 / 1024 + "MB" + " evicted=" + _evict);
|
||||
_evict = 0;
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void put(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
// Log.tracev("Adding {0} to cache: {1}", key, obj);
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
int size = obj.map(o -> o.data().estimateSize()).orElse(16);
|
||||
|
||||
_curSize += size;
|
||||
var entry = new CacheEntry(obj.<MaybeTombstone<JDataVersionedWrapper>>map(Data::new).orElse(new Tombstone<>()), size);
|
||||
var old = _cache.putLast(key, entry);
|
||||
|
||||
_sortedCache = _sortedCache.plus(key, entry);
|
||||
if (old != null)
|
||||
_curSize -= old.size();
|
||||
|
||||
while (_curSize >= sizeLimit) {
|
||||
var del = _cache.pollFirstEntry();
|
||||
_sortedCache = _sortedCache.minus(del.getKey());
|
||||
_curSize -= del.getValue().size();
|
||||
_evict++;
|
||||
}
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
|
||||
try (var lock = _locker.lock(name)) {
|
||||
_lock.readLock().lock();
|
||||
try {
|
||||
var got = _cache.get(name);
|
||||
if (got != null) {
|
||||
return got.object().opt();
|
||||
}
|
||||
} finally {
|
||||
_lock.readLock().unlock();
|
||||
}
|
||||
|
||||
// TODO: This is possibly racy
|
||||
// var got = delegate.readObject(name);
|
||||
// put(name, got);
|
||||
return delegate.readObject(name);
|
||||
}
|
||||
}
|
||||
|
||||
public void commitTx(TxManifestObj<? extends JDataVersionedWrapper> names, long txId) {
|
||||
var serialized = delegate.prepareManifest(names);
|
||||
Log.tracev("Committing: {0} writes, {1} deletes", names.written().size(), names.deleted().size());
|
||||
delegate.commitTx(serialized, txId, (commit) -> {
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
// Make the changes visible atomically both in cache and in the underlying store
|
||||
for (var write : names.written()) {
|
||||
put(write.getLeft(), Optional.of(write.getRight()));
|
||||
}
|
||||
for (var del : names.deleted()) {
|
||||
put(del, Optional.empty());
|
||||
}
|
||||
++_cacheVersion;
|
||||
commit.run();
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
});
|
||||
Log.tracev("Committed: {0} writes, {1} deletes", names.written().size(), names.deleted().size());
|
||||
}
|
||||
|
||||
|
||||
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, JDataVersionedWrapper> {
|
||||
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _delegate;
|
||||
// This should be created under lock
|
||||
private final long _curCacheVersion = _cacheVersion;
|
||||
|
||||
private CachingKvIterator(CloseableKvIterator<JObjectKey, JDataVersionedWrapper> delegate) {
|
||||
_delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekNextKey() {
|
||||
return _delegate.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_delegate.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_delegate.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _delegate.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekPrevKey() {
|
||||
return _delegate.peekPrevKey();
|
||||
}
|
||||
|
||||
private void maybeCache(Pair<JObjectKey, JDataVersionedWrapper> prev) {
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
if (_cacheVersion != _curCacheVersion) {
|
||||
Log.tracev("Not caching: {0}", prev);
|
||||
} else {
|
||||
Log.tracev("Caching: {0}", prev);
|
||||
put(prev.getKey(), Optional.of(prev.getValue()));
|
||||
}
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JDataVersionedWrapper> prev() {
|
||||
var prev = _delegate.prev();
|
||||
maybeCache(prev);
|
||||
return prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _delegate.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_delegate.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JDataVersionedWrapper> next() {
|
||||
var next = _delegate.next();
|
||||
maybeCache(next);
|
||||
return next;
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an iterator with a view of all commited objects
|
||||
// Does not have to guarantee consistent view, snapshots are handled by upper layers
|
||||
// Warning: it has a nasty side effect of global caching, so in this case don't even call next on it,
|
||||
// if some objects are still in writeback
|
||||
public CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
_lock.readLock().lock();
|
||||
try {
|
||||
Log.tracev("Getting cache iterator: {0}, {1}", start, key);
|
||||
var curSortedCache = _sortedCache;
|
||||
return new MergingKvIterator<>("cache", start, key,
|
||||
(mS, mK)
|
||||
-> new MappingKvIterator<>(
|
||||
new NavigableMapKvIterator<>(curSortedCache, mS, mK),
|
||||
e -> {
|
||||
Log.tracev("Taken from cache: {0}", e);
|
||||
return e.object();
|
||||
}
|
||||
),
|
||||
(mS, mK)
|
||||
-> new MappingKvIterator<>(new CachingKvIterator(delegate.getIterator(mS, mK)), Data::new));
|
||||
} finally {
|
||||
_lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private record CacheEntry(MaybeTombstone<JDataVersionedWrapper> object, long size) {
|
||||
}
|
||||
|
||||
public long getLastTxId() {
|
||||
return delegate.getLastCommitId();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.usatiuk.dhfs.objects.persistence;
|
||||
|
||||
public enum IteratorStart {
|
||||
LT,
|
||||
LE,
|
||||
GT,
|
||||
GE,
|
||||
}
|
||||
@@ -0,0 +1,337 @@
|
||||
package com.usatiuk.dhfs.objects.persistence;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.objects.CloseableKvIterator;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.objects.KeyPredicateKvIterator;
|
||||
import com.usatiuk.dhfs.objects.ReversibleKvIterator;
|
||||
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.apache.commons.lang3.mutable.MutableObject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import org.lmdbjava.*;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.io.IOException;
|
||||
import java.lang.ref.Cleaner;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.lmdbjava.DbiFlags.MDB_CREATE;
|
||||
import static org.lmdbjava.Env.create;
|
||||
|
||||
@ApplicationScoped
|
||||
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "lmdb")
|
||||
public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
private final Path _root;
|
||||
private Env<ByteBuffer> _env;
|
||||
private Dbi<ByteBuffer> _db;
|
||||
private boolean _ready = false;
|
||||
|
||||
private long _lastTxId = 0;
|
||||
|
||||
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
|
||||
|
||||
private static final String DB_NAME = "objects";
|
||||
private static final byte[] DB_VER_OBJ_NAME = "__DB_VER_OBJ".getBytes(StandardCharsets.UTF_8);
|
||||
|
||||
public LmdbObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) {
|
||||
_root = Path.of(root).resolve("objects");
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(100) StartupEvent event) throws IOException {
|
||||
if (!_root.toFile().exists()) {
|
||||
Log.info("Initializing with root " + _root);
|
||||
_root.toFile().mkdirs();
|
||||
}
|
||||
_env = create()
|
||||
.setMapSize(1_000_000_000_000L)
|
||||
.setMaxDbs(1)
|
||||
.open(_root.toFile(), EnvFlags.MDB_NOTLS);
|
||||
_db = _env.openDbi(DB_NAME, MDB_CREATE);
|
||||
|
||||
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
|
||||
bb.put(DB_VER_OBJ_NAME);
|
||||
bb.flip();
|
||||
|
||||
try (Txn<ByteBuffer> txn = _env.txnRead()) {
|
||||
var value = _db.get(txn, bb);
|
||||
if (value != null) {
|
||||
var ver = value.getLong();
|
||||
Log.infov("Read version: {0}", ver);
|
||||
_lastTxId = ver;
|
||||
}
|
||||
}
|
||||
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(900) ShutdownEvent event) throws IOException {
|
||||
_ready = false;
|
||||
_db.close();
|
||||
_env.close();
|
||||
}
|
||||
|
||||
private void verifyReady() {
|
||||
if (!_ready) throw new IllegalStateException("Wrong service order!");
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Collection<JObjectKey> findAllObjects() {
|
||||
// try (Txn<ByteBuffer> txn = env.txnRead()) {
|
||||
// try (var cursor = db.openCursor(txn)) {
|
||||
// var keys = List.of();
|
||||
// while (cursor.next()) {
|
||||
// keys.add(JObjectKey.fromBytes(cursor.key()));
|
||||
// }
|
||||
// return keys;
|
||||
// }
|
||||
// }
|
||||
return List.of();
|
||||
}
|
||||
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
verifyReady();
|
||||
try (Txn<ByteBuffer> txn = _env.txnRead()) {
|
||||
var value = _db.get(txn, name.toByteBuffer());
|
||||
return Optional.ofNullable(value).map(ByteString::copyFrom);
|
||||
}
|
||||
}
|
||||
|
||||
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, ByteString> {
|
||||
private final Txn<ByteBuffer> _txn = _env.txnRead();
|
||||
private final Cursor<ByteBuffer> _cursor = _db.openCursor(_txn);
|
||||
private boolean _hasNext = false;
|
||||
|
||||
private static final Cleaner CLEANER = Cleaner.create();
|
||||
private final MutableObject<Boolean> _closed = new MutableObject<>(false);
|
||||
private final Exception _allocationStacktrace = new Exception();
|
||||
|
||||
LmdbKvIterator(IteratorStart start, JObjectKey key) {
|
||||
_goingForward = true;
|
||||
var closedRef = _closed;
|
||||
var bt = _allocationStacktrace;
|
||||
CLEANER.register(this, () -> {
|
||||
if (!closedRef.getValue()) {
|
||||
Log.error("Iterator was not closed before GC, allocated at: {0}", bt);
|
||||
System.exit(-1);
|
||||
}
|
||||
});
|
||||
|
||||
verifyReady();
|
||||
if (!_cursor.get(key.toByteBuffer(), GetOp.MDB_SET_RANGE)) {
|
||||
return;
|
||||
}
|
||||
|
||||
var got = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
_cursor.key().flip();
|
||||
var cmp = got.compareTo(key);
|
||||
|
||||
assert cmp >= 0;
|
||||
|
||||
_hasNext = true;
|
||||
|
||||
if (cmp == 0) {
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
_hasNext = _cursor.prev();
|
||||
if (!_hasNext) {
|
||||
_hasNext = _cursor.first();
|
||||
}
|
||||
}
|
||||
case GT -> {
|
||||
_hasNext = _cursor.next();
|
||||
}
|
||||
case LE, GE -> {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch (start) {
|
||||
case LT, LE -> {
|
||||
_hasNext = _cursor.prev();
|
||||
if (!_hasNext) {
|
||||
_hasNext = _cursor.first();
|
||||
}
|
||||
}
|
||||
case GT, GE -> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var realGot = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
_cursor.key().flip();
|
||||
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert !_hasNext || realGot.compareTo(key) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert !_hasNext || realGot.compareTo(key) >= 0;
|
||||
}
|
||||
}
|
||||
Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (_closed.getValue()) {
|
||||
return;
|
||||
}
|
||||
_closed.setValue(true);
|
||||
_cursor.close();
|
||||
_txn.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
if (_hasNext) {
|
||||
if (_goingForward) {
|
||||
_hasNext = _cursor.prev();
|
||||
} else {
|
||||
_hasNext = _cursor.next();
|
||||
}
|
||||
} else {
|
||||
if (_goingForward) {
|
||||
_hasNext = _cursor.last();
|
||||
} else {
|
||||
_hasNext = _cursor.first();
|
||||
}
|
||||
}
|
||||
_goingForward = !_goingForward;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JObjectKey peekImpl() {
|
||||
if (!_hasNext) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
var ret = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
_cursor.key().flip();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void skipImpl() {
|
||||
if (_goingForward)
|
||||
_hasNext = _cursor.next();
|
||||
else
|
||||
_hasNext = _cursor.prev();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean hasImpl() {
|
||||
return _hasNext;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<JObjectKey, ByteString> nextImpl() {
|
||||
if (!_hasNext) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), ByteString.copyFrom(_cursor.val()));
|
||||
if (_goingForward)
|
||||
_hasNext = _cursor.next();
|
||||
else
|
||||
_hasNext = _cursor.prev();
|
||||
Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new KeyPredicateKvIterator<>(new LmdbKvIterator(start, key), start, key, (k) -> !Arrays.equals(k.name().getBytes(StandardCharsets.UTF_8), DB_VER_OBJ_NAME));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked) {
|
||||
verifyReady();
|
||||
try (Txn<ByteBuffer> txn = _env.txnWrite()) {
|
||||
for (var written : names.written()) {
|
||||
// TODO:
|
||||
var bb = UninitializedByteBuffer.allocateUninitialized(written.getValue().size());
|
||||
bb.put(written.getValue().asReadOnlyByteBuffer());
|
||||
bb.flip();
|
||||
_db.put(txn, written.getKey().toByteBuffer(), bb);
|
||||
}
|
||||
for (JObjectKey key : names.deleted()) {
|
||||
_db.delete(txn, key.toByteBuffer());
|
||||
}
|
||||
|
||||
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
|
||||
bb.put(DB_VER_OBJ_NAME);
|
||||
bb.flip();
|
||||
var bbData = ByteBuffer.allocateDirect(8);
|
||||
|
||||
commitLocked.accept(() -> {
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
var realTxId = txId;
|
||||
if (realTxId == -1)
|
||||
realTxId = _lastTxId + 1;
|
||||
|
||||
assert realTxId > _lastTxId;
|
||||
_lastTxId = realTxId;
|
||||
|
||||
bbData.putLong(realTxId);
|
||||
bbData.flip();
|
||||
_db.put(txn, bb, bbData);
|
||||
|
||||
txn.commit();
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getTotalSpace() {
|
||||
verifyReady();
|
||||
return _root.toFile().getTotalSpace();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFreeSpace() {
|
||||
verifyReady();
|
||||
return _root.toFile().getFreeSpace();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsableSpace() {
|
||||
verifyReady();
|
||||
return _root.toFile().getUsableSpace();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLastCommitId() {
|
||||
_lock.readLock().lock();
|
||||
try {
|
||||
return _lastTxId;
|
||||
} finally {
|
||||
_lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,90 @@
|
||||
package com.usatiuk.dhfs.objects.persistence;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.objects.CloseableKvIterator;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.objects.NavigableMapKvIterator;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
@ApplicationScoped
|
||||
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory")
|
||||
public class MemoryObjectPersistentStore implements ObjectPersistentStore {
|
||||
private final ConcurrentSkipListMap<JObjectKey, ByteString> _objects = new ConcurrentSkipListMap<>();
|
||||
private long _lastCommitId = 0;
|
||||
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Collection<JObjectKey> findAllObjects() {
|
||||
synchronized (this) {
|
||||
return _objects.keySet();
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
synchronized (this) {
|
||||
return Optional.ofNullable(_objects.get(name));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new NavigableMapKvIterator<>(_objects, start, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked) {
|
||||
synchronized (this) {
|
||||
for (var written : names.written()) {
|
||||
_objects.put(written.getKey(), written.getValue());
|
||||
}
|
||||
for (JObjectKey key : names.deleted()) {
|
||||
_objects.remove(key);
|
||||
}
|
||||
commitLocked.accept(() -> {
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
assert txId > _lastCommitId;
|
||||
_lastCommitId = txId;
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getTotalSpace() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFreeSpace() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsableSpace() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLastCommitId() {
|
||||
_lock.readLock().lock();
|
||||
try {
|
||||
return _lastCommitId;
|
||||
} finally {
|
||||
_lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
package com.usatiuk.dhfs.objects.persistence;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.objects.CloseableKvIterator;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
// Persistent storage of objects
|
||||
// All changes are written as sequential transactions
|
||||
public interface ObjectPersistentStore {
|
||||
@Nonnull
|
||||
Collection<JObjectKey> findAllObjects();
|
||||
|
||||
@Nonnull
|
||||
Optional<ByteString> readObject(JObjectKey name);
|
||||
|
||||
// Returns an iterator with a view of all commited objects
|
||||
// Does not have to guarantee consistent view, snapshots are handled by upper layers
|
||||
CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key);
|
||||
|
||||
/**
|
||||
* @param commitLocked - a function that will be called with a Runnable that will commit the transaction
|
||||
* the changes in the store will be visible to new transactions only after the runnable is called
|
||||
*/
|
||||
void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked);
|
||||
|
||||
long getTotalSpace();
|
||||
|
||||
long getFreeSpace();
|
||||
|
||||
long getUsableSpace();
|
||||
|
||||
long getLastCommitId();
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package com.usatiuk.dhfs.objects.persistence;
|
||||
|
||||
import com.usatiuk.dhfs.objects.*;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
@ApplicationScoped
|
||||
public class SerializingObjectPersistentStore {
|
||||
@Inject
|
||||
ObjectSerializer<JDataVersionedWrapper> serializer;
|
||||
|
||||
@Inject
|
||||
ObjectPersistentStore delegateStore;
|
||||
|
||||
@Nonnull
|
||||
Collection<JObjectKey> findAllObjects() {
|
||||
return delegateStore.findAllObjects();
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
|
||||
return delegateStore.readObject(name).map(serializer::deserialize);
|
||||
}
|
||||
|
||||
// Returns an iterator with a view of all commited objects
|
||||
// Does not have to guarantee consistent view, snapshots are handled by upper layers
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new MappingKvIterator<>(delegateStore.getIterator(start, key), d -> serializer.deserialize(d));
|
||||
}
|
||||
|
||||
public TxManifestRaw prepareManifest(TxManifestObj<? extends JDataVersionedWrapper> names) {
|
||||
return new TxManifestRaw(
|
||||
names.written().stream()
|
||||
.map(e -> Pair.of(e.getKey(), serializer.serialize(e.getValue())))
|
||||
.toList()
|
||||
, names.deleted());
|
||||
}
|
||||
|
||||
// void commitTx(TxManifestObj<? extends JDataVersionedWrapper> names, Consumer<Runnable> commitLocked) {
|
||||
// delegateStore.commitTx(prepareManifest(names), commitLocked);
|
||||
// }
|
||||
|
||||
void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked) {
|
||||
delegateStore.commitTx(names, txId, commitLocked);
|
||||
}
|
||||
|
||||
long getLastCommitId() {
|
||||
return delegateStore.getLastCommitId();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
package com.usatiuk.dhfs.objects.persistence;
|
||||
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
|
||||
// FIXME: Serializable
|
||||
public record TxManifestObj<T>(Collection<Pair<JObjectKey, T>> written,
|
||||
Collection<JObjectKey> deleted) implements Serializable {
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
package com.usatiuk.dhfs.objects.persistence;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
|
||||
// FIXME: Serializable
|
||||
public record TxManifestRaw(Collection<Pair<JObjectKey, ByteString>> written,
|
||||
Collection<JObjectKey> deleted) implements Serializable {
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.dhfs.objects.snapshot;
|
||||
|
||||
public interface SnapshotEntry {
|
||||
long whenToRemove();
|
||||
|
||||
SnapshotEntry withWhenToRemove(long whenToRemove);
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.usatiuk.dhfs.objects.snapshot;
|
||||
|
||||
public record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry {
|
||||
@Override
|
||||
public SnapshotEntryDeleted withWhenToRemove(long whenToRemove) {
|
||||
return new SnapshotEntryDeleted(whenToRemove);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.dhfs.objects.snapshot;
|
||||
|
||||
import com.usatiuk.dhfs.objects.JDataVersionedWrapper;
|
||||
|
||||
public record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry {
|
||||
@Override
|
||||
public SnapshotEntryObject withWhenToRemove(long whenToRemove) {
|
||||
return new SnapshotEntryObject(data, whenToRemove);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.usatiuk.dhfs.objects.snapshot;
|
||||
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Comparator;
|
||||
|
||||
public record SnapshotKey(JObjectKey key, long version) implements Comparable<SnapshotKey> {
|
||||
@Override
|
||||
public int compareTo(@Nonnull SnapshotKey o) {
|
||||
return Comparator.comparing(SnapshotKey::key)
|
||||
.thenComparing(SnapshotKey::version)
|
||||
.compare(this, o);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,185 @@
|
||||
package com.usatiuk.dhfs.objects.snapshot;
|
||||
|
||||
import com.usatiuk.dhfs.objects.*;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.NavigableMap;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Optional;
|
||||
|
||||
// TODO: test me
|
||||
public class SnapshotKvIterator extends ReversibleKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> {
|
||||
private final NavigableMap<SnapshotKey, SnapshotEntry> _objects;
|
||||
private final long _version;
|
||||
private final CloseableKvIterator<SnapshotKey, SnapshotEntry> _backing;
|
||||
private Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> _next = null;
|
||||
|
||||
public SnapshotKvIterator(NavigableMap<SnapshotKey, SnapshotEntry> objects, long version, IteratorStart start, JObjectKey startKey) {
|
||||
_objects = objects;
|
||||
_version = version;
|
||||
_goingForward = true;
|
||||
_backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, Long.MIN_VALUE));
|
||||
fill();
|
||||
|
||||
boolean shouldGoBack = false;
|
||||
if (start == IteratorStart.LE) {
|
||||
if (_next == null || _next.getKey().compareTo(startKey) > 0) {
|
||||
shouldGoBack = true;
|
||||
}
|
||||
} else if (start == IteratorStart.LT) {
|
||||
if (_next == null || _next.getKey().compareTo(startKey) >= 0) {
|
||||
shouldGoBack = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldGoBack && _backing.hasPrev()) {
|
||||
_goingForward = false;
|
||||
_backing.skipPrev();
|
||||
fill();
|
||||
_goingForward = true;
|
||||
_backing.skip();
|
||||
fill();
|
||||
}
|
||||
|
||||
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.getKey().compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.getKey().compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void fillPrev(JObjectKey ltKey) {
|
||||
if (ltKey != null)
|
||||
while (_backing.hasPrev() && _backing.peekPrevKey().key().equals(ltKey)) {
|
||||
Log.tracev("Snapshot skipping prev: {0}", _backing.peekPrevKey());
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
_next = null;
|
||||
|
||||
while (_backing.hasPrev() && _next == null) {
|
||||
var prev = _backing.prev();
|
||||
if (prev.getKey().version() <= _version && prev.getValue().whenToRemove() > _version) {
|
||||
Log.tracev("Snapshot skipping prev: {0} (too new)", prev);
|
||||
_next = switch (prev.getValue()) {
|
||||
case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) ->
|
||||
Pair.of(prev.getKey().key(), new Data<>(data));
|
||||
case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(prev.getKey().key(), new Tombstone<>());
|
||||
default -> throw new IllegalStateException("Unexpected value: " + prev.getValue());
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (_next != null) {
|
||||
if (_next.getValue() instanceof Data<JDataVersionedWrapper>(
|
||||
JDataVersionedWrapper value
|
||||
)) {
|
||||
assert value.version() <= _version;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
_next = null;
|
||||
while (_backing.hasNext() && _next == null) {
|
||||
var next = _backing.next();
|
||||
var nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null;
|
||||
while (nextNextKey != null && nextNextKey.key().equals(next.getKey().key()) && nextNextKey.version() <= _version) {
|
||||
Log.tracev("Snapshot skipping next: {0} (too old)", next);
|
||||
next = _backing.next();
|
||||
nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null;
|
||||
}
|
||||
// next.getValue().whenToRemove() >=_id, read tx might have same snapshot id as some write tx
|
||||
if (next.getKey().version() <= _version && next.getValue().whenToRemove() > _version) {
|
||||
_next = switch (next.getValue()) {
|
||||
case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) ->
|
||||
Pair.of(next.getKey().key(), new Data<>(data));
|
||||
case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(next.getKey().key(), new Tombstone<>());
|
||||
default -> throw new IllegalStateException("Unexpected value: " + next.getValue());
|
||||
};
|
||||
}
|
||||
if (_next != null) {
|
||||
if (_next.getValue() instanceof Data<JDataVersionedWrapper>(
|
||||
JDataVersionedWrapper value
|
||||
)) {
|
||||
assert value.version() <= _version;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void fill() {
|
||||
if (_goingForward)
|
||||
fillNext();
|
||||
else
|
||||
fillPrev(Optional.ofNullable(_next).map(Pair::getKey).orElse(null));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void reverse() {
|
||||
_goingForward = !_goingForward;
|
||||
|
||||
boolean wasAtEnd = _next == null;
|
||||
|
||||
if (_goingForward && !wasAtEnd)
|
||||
_backing.skip();
|
||||
else if (!_goingForward && !wasAtEnd)
|
||||
_backing.skipPrev();
|
||||
|
||||
fill();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException();
|
||||
return _next.getKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException();
|
||||
fill();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasImpl() {
|
||||
return _next != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> nextImpl() {
|
||||
if (_next == null)
|
||||
throw new NoSuchElementException("No more elements");
|
||||
var ret = _next;
|
||||
if (ret.getValue() instanceof Data<JDataVersionedWrapper>(
|
||||
JDataVersionedWrapper value
|
||||
)) {
|
||||
assert value.version() <= _version;
|
||||
}
|
||||
|
||||
fill();
|
||||
Log.tracev("Read: {0}, next: {1}", ret, _next);
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,342 @@
|
||||
package com.usatiuk.dhfs.objects.snapshot;
|
||||
|
||||
import com.usatiuk.dhfs.objects.*;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.transaction.TxRecord;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.mutable.MutableObject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.lang.ref.Cleaner;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
@ApplicationScoped
|
||||
public class SnapshotManager {
|
||||
@Inject
|
||||
WritebackObjectPersistentStore writebackStore;
|
||||
|
||||
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.snapshot-extra-checks")
|
||||
boolean extraChecks;
|
||||
|
||||
private long _lastSnapshotId = 0;
|
||||
private long _lastAliveSnapshotId = -1;
|
||||
|
||||
private final Queue<Long> _snapshotIds = new ArrayDeque<>();
|
||||
private TreePMap<SnapshotKey, SnapshotEntry> _objects = TreePMap.empty();
|
||||
private final TreeMap<Long, ArrayDeque<SnapshotKey>> _snapshotBounds = new TreeMap<>();
|
||||
private final HashMap<Long, Long> _snapshotRefCounts = new HashMap<>();
|
||||
|
||||
private void verify() {
|
||||
assert _snapshotIds.isEmpty() == (_lastAliveSnapshotId == -1);
|
||||
assert _snapshotIds.isEmpty() || _snapshotIds.peek() == _lastAliveSnapshotId;
|
||||
}
|
||||
|
||||
// This should not be called for the same objects concurrently
|
||||
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes) {
|
||||
// _lock.writeLock().lock();
|
||||
// try {
|
||||
// if (!_snapshotIds.isEmpty()) {
|
||||
// verify();
|
||||
HashMap<SnapshotKey, SnapshotEntry> newEntries = new HashMap<>();
|
||||
for (var action : writes) {
|
||||
var current = writebackStore.readObjectVerbose(action.key());
|
||||
// Add to snapshot the previous visible version of the replaced object
|
||||
// I.e. should be visible to all transactions with id <= id
|
||||
// and at least as its corresponding version
|
||||
Pair<SnapshotKey, SnapshotEntry> newSnapshotEntry = switch (current) {
|
||||
case WritebackObjectPersistentStore.VerboseReadResultPersisted(
|
||||
Optional<JDataVersionedWrapper> data
|
||||
) -> Pair.of(new SnapshotKey(action.key(), data.map(JDataVersionedWrapper::version).orElse(-1L)),
|
||||
data.<SnapshotEntry>map(o -> new SnapshotEntryObject(o, -1)).orElse(new SnapshotEntryDeleted(-1)));
|
||||
case WritebackObjectPersistentStore.VerboseReadResultPending(
|
||||
PendingWriteEntry pending
|
||||
) -> {
|
||||
yield switch (pending) {
|
||||
case PendingWrite write ->
|
||||
Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data(), -1));
|
||||
case PendingDelete delete ->
|
||||
Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted(-1));
|
||||
default -> throw new IllegalStateException("Unexpected value: " + pending);
|
||||
};
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + current);
|
||||
};
|
||||
|
||||
|
||||
Log.tracev("Adding snapshot entry {0}", newSnapshotEntry);
|
||||
|
||||
newEntries.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight());
|
||||
}
|
||||
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
return writebackStore.commitTx(writes, (id, commit) -> {
|
||||
if (!_snapshotIds.isEmpty()) {
|
||||
assert id > _lastSnapshotId;
|
||||
for (var newSnapshotEntry : newEntries.entrySet()) {
|
||||
assert newSnapshotEntry.getKey().version() < id;
|
||||
var realNewSnapshotEntry = newSnapshotEntry.getValue().withWhenToRemove(id);
|
||||
if (realNewSnapshotEntry instanceof SnapshotEntryObject re) {
|
||||
assert re.data().version() <= newSnapshotEntry.getKey().version();
|
||||
}
|
||||
_objects = _objects.plus(newSnapshotEntry.getKey(), realNewSnapshotEntry);
|
||||
// assert val == null;
|
||||
_snapshotBounds.merge(newSnapshotEntry.getKey().version(), new ArrayDeque<>(List.of(newSnapshotEntry.getKey())),
|
||||
(a, b) -> {
|
||||
a.addAll(b);
|
||||
return a;
|
||||
});
|
||||
}
|
||||
}
|
||||
commit.run();
|
||||
});
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
// }
|
||||
|
||||
// verify();
|
||||
// Commit under lock, iterators will see new version after the lock is released and writeback
|
||||
// cache is updated
|
||||
// TODO: Maybe writeback iterator being invalidated wouldn't be a problem?
|
||||
// } finally {
|
||||
// _lock.writeLock().unlock();
|
||||
// }
|
||||
}
|
||||
|
||||
private void unrefSnapshot(long id) {
|
||||
Log.tracev("Unref snapshot {0}", id);
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
verify();
|
||||
var refCount = _snapshotRefCounts.merge(id, -1L, (a, b) -> a + b == 0 ? null : a + b);
|
||||
if (!(refCount == null && id == _lastAliveSnapshotId)) {
|
||||
return;
|
||||
}
|
||||
|
||||
long curCount;
|
||||
long curId = id;
|
||||
long nextId;
|
||||
do {
|
||||
Log.tracev("Removing snapshot {0}", curId);
|
||||
_snapshotIds.poll();
|
||||
nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek();
|
||||
while (nextId == curId) {
|
||||
_snapshotIds.poll();
|
||||
nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek();
|
||||
}
|
||||
|
||||
var keys = _snapshotBounds.headMap(curId, true);
|
||||
|
||||
long finalCurId = curId;
|
||||
long finalNextId = nextId;
|
||||
ArrayList<Pair<Long, SnapshotKey>> toReAdd = new ArrayList<>();
|
||||
keys.values().stream().flatMap(Collection::stream).forEach(key -> {
|
||||
var entry = _objects.get(key);
|
||||
if (entry == null) {
|
||||
// Log.warnv("Entry not found for key {0}", key);
|
||||
return;
|
||||
}
|
||||
if (finalNextId == -1) {
|
||||
Log.tracev("Could not find place to place entry {0}, curId={1}, nextId={2}, whenToRemove={3}, snapshotIds={4}",
|
||||
entry, finalCurId, finalNextId, entry.whenToRemove(), _snapshotIds);
|
||||
} else if (finalNextId < entry.whenToRemove()) {
|
||||
_objects = _objects.plus(new SnapshotKey(key.key(), finalNextId), entry);
|
||||
assert finalNextId > finalCurId;
|
||||
toReAdd.add(Pair.of(finalNextId, new SnapshotKey(key.key(), finalNextId)));
|
||||
}
|
||||
_objects = _objects.minus(key);
|
||||
});
|
||||
|
||||
toReAdd.forEach(p -> {
|
||||
_snapshotBounds.merge(p.getLeft(), new ArrayDeque<>(List.of(p.getRight())),
|
||||
(a, b) -> {
|
||||
a.addAll(b);
|
||||
return a;
|
||||
});
|
||||
});
|
||||
|
||||
keys.clear();
|
||||
|
||||
if (_snapshotIds.isEmpty()) {
|
||||
_lastAliveSnapshotId = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
curId = _snapshotIds.peek();
|
||||
_lastAliveSnapshotId = curId;
|
||||
|
||||
curCount = _snapshotRefCounts.getOrDefault(curId, 0L);
|
||||
} while (curCount == 0);
|
||||
verify();
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public static class IllegalSnapshotIdException extends IllegalArgumentException {
|
||||
public IllegalSnapshotIdException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public class Snapshot implements AutoCloseableNoThrow {
|
||||
private final long _id;
|
||||
private static final Cleaner CLEANER = Cleaner.create();
|
||||
private final MutableObject<Boolean> _closed = new MutableObject<>(false);
|
||||
|
||||
public long id() {
|
||||
return _id;
|
||||
}
|
||||
|
||||
private Snapshot(long id) {
|
||||
_id = id;
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
verify();
|
||||
if (_lastSnapshotId > id)
|
||||
throw new IllegalSnapshotIdException("Snapshot id " + id + " is less than last snapshot id " + _lastSnapshotId);
|
||||
_lastSnapshotId = id;
|
||||
if (_lastAliveSnapshotId == -1)
|
||||
_lastAliveSnapshotId = id;
|
||||
if (_snapshotRefCounts.merge(id, 1L, Long::sum) == 1) {
|
||||
_snapshotIds.add(id);
|
||||
}
|
||||
verify();
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
var closedRef = _closed;
|
||||
var idRef = _id;
|
||||
CLEANER.register(this, () -> {
|
||||
if (!closedRef.getValue()) {
|
||||
Log.error("Snapshot " + idRef + " was not closed before GC");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public class CheckingSnapshotKvIterator implements CloseableKvIterator<JObjectKey, JDataVersionedWrapper> {
|
||||
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _backing;
|
||||
|
||||
public CheckingSnapshotKvIterator(CloseableKvIterator<JObjectKey, JDataVersionedWrapper> backing) {
|
||||
_backing = backing;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekNextKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekPrevKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JDataVersionedWrapper> prev() {
|
||||
var ret = _backing.prev();
|
||||
assert ret.getValue().version() <= _id;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JDataVersionedWrapper> next() {
|
||||
var ret = _backing.next();
|
||||
assert ret.getValue().version() <= _id;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
_lock.readLock().lock();
|
||||
try {
|
||||
Log.tracev("Getting snapshot {0} iterator for {1} {2}\n" +
|
||||
"objects in snapshots: {3}", _id, start, key, _objects);
|
||||
return new CheckingSnapshotKvIterator(new TombstoneMergingKvIterator<>("snapshot", start, key,
|
||||
(tS, tK) -> new SnapshotKvIterator(_objects, _id, tS, tK),
|
||||
(tS, tK) -> new MappingKvIterator<>(
|
||||
writebackStore.getIterator(tS, tK), d -> d.version() <= _id ? new Data<>(d) : new Tombstone<>())
|
||||
));
|
||||
} finally {
|
||||
_lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
|
||||
try (var it = getIterator(IteratorStart.GE, name)) {
|
||||
if (it.hasNext()) {
|
||||
if (!it.peekNextKey().equals(name)) {
|
||||
return Optional.empty();
|
||||
}
|
||||
return Optional.of(it.next().getValue());
|
||||
}
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (_closed.getValue()) {
|
||||
return;
|
||||
}
|
||||
_closed.setValue(true);
|
||||
unrefSnapshot(_id);
|
||||
}
|
||||
}
|
||||
|
||||
public Snapshot createSnapshot() {
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
return new Snapshot(writebackStore.getLastTxId());
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public Optional<JDataVersionedWrapper> readObjectDirect(JObjectKey name) {
|
||||
return writebackStore.readObject(name);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
public enum LockingStrategy {
|
||||
OPTIMISTIC, // Optimistic write, no blocking other possible writers/readers
|
||||
WRITE, // Write lock, blocks all other writers
|
||||
}
|
||||
@@ -0,0 +1,138 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
import com.usatiuk.dhfs.objects.*;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.snapshot.SnapshotManager;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
@ApplicationScoped
|
||||
public class ReadTrackingObjectSourceFactory {
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
|
||||
public ReadTrackingTransactionObjectSource create(SnapshotManager.Snapshot snapshot) {
|
||||
return new ReadTrackingObjectSourceImpl(snapshot);
|
||||
}
|
||||
|
||||
public class ReadTrackingObjectSourceImpl implements ReadTrackingTransactionObjectSource {
|
||||
private final SnapshotManager.Snapshot _snapshot;
|
||||
|
||||
private final Map<JObjectKey, TransactionObject<?>> _readSet = new HashMap<>();
|
||||
|
||||
public ReadTrackingObjectSourceImpl(SnapshotManager.Snapshot snapshot) {
|
||||
_snapshot = snapshot;
|
||||
}
|
||||
|
||||
public Map<JObjectKey, TransactionObject<?>> getRead() {
|
||||
return Collections.unmodifiableMap(_readSet);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key) {
|
||||
var got = _readSet.get(key);
|
||||
|
||||
if (got == null) {
|
||||
var read = _snapshot.readObject(key);
|
||||
_readSet.put(key, new TransactionObjectNoLock<>(read));
|
||||
return read.map(JDataVersionedWrapper::data).map(type::cast);
|
||||
}
|
||||
|
||||
return got.data().map(JDataVersionedWrapper::data).map(type::cast);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> getWriteLocked(Class<T> type, JObjectKey key) {
|
||||
var got = _readSet.get(key);
|
||||
|
||||
if (got == null) {
|
||||
var lock = lockManager.lockObject(key);
|
||||
try {
|
||||
var read = _snapshot.readObject(key);
|
||||
_readSet.put(key, new TransactionObjectLocked<>(read, lock));
|
||||
return read.map(JDataVersionedWrapper::data).map(type::cast);
|
||||
} catch (Exception e) {
|
||||
lock.close();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
return got.data().map(JDataVersionedWrapper::data).map(type::cast);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// for (var it : _iterators) {
|
||||
// it.close();
|
||||
// }
|
||||
}
|
||||
|
||||
private class ReadTrackingIterator implements CloseableKvIterator<JObjectKey, JData> {
|
||||
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _backing;
|
||||
|
||||
public ReadTrackingIterator(IteratorStart start, JObjectKey key) {
|
||||
_backing = _snapshot.getIterator(start, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekNextKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObjectKey peekPrevKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JData> prev() {
|
||||
var got = _backing.prev();
|
||||
_readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(got.getValue())));
|
||||
return Pair.of(got.getKey(), got.getValue().data());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, JData> next() {
|
||||
var got = _backing.next();
|
||||
_readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(got.getValue())));
|
||||
return Pair.of(got.getKey(), got.getValue().data());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new ReadTrackingIterator(start, key);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
import com.usatiuk.dhfs.objects.CloseableKvIterator;
|
||||
import com.usatiuk.dhfs.objects.JData;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
public interface ReadTrackingTransactionObjectSource extends AutoCloseableNoThrow {
|
||||
<T extends JData> Optional<T> get(Class<T> type, JObjectKey key);
|
||||
|
||||
<T extends JData> Optional<T> getWriteLocked(Class<T> type, JObjectKey key);
|
||||
|
||||
CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key);
|
||||
|
||||
default CloseableKvIterator<JObjectKey, JData> getIterator(JObjectKey key) {
|
||||
return getIterator(IteratorStart.GE, key);
|
||||
}
|
||||
|
||||
Map<JObjectKey, TransactionObject<?>> getRead();
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
import com.usatiuk.dhfs.objects.CloseableKvIterator;
|
||||
import com.usatiuk.dhfs.objects.JData;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
|
||||
// The transaction interface actually used by user code to retrieve objects
|
||||
public interface Transaction extends TransactionHandle {
|
||||
void onCommit(Runnable runnable);
|
||||
|
||||
<T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy);
|
||||
|
||||
<T extends JData> void put(JData obj);
|
||||
|
||||
void delete(JObjectKey key);
|
||||
|
||||
@Nonnull
|
||||
Collection<JObjectKey> findAllObjects(); // FIXME: This is crap
|
||||
|
||||
default <T extends JData> Optional<T> get(Class<T> type, JObjectKey key) {
|
||||
return get(type, key, LockingStrategy.OPTIMISTIC);
|
||||
}
|
||||
|
||||
CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key);
|
||||
|
||||
default CloseableKvIterator<JObjectKey, JData> getIterator(JObjectKey key) {
|
||||
return getIterator(IteratorStart.GE, key);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
public interface TransactionFactory {
|
||||
TransactionPrivate createTransaction();
|
||||
}
|
||||
@@ -0,0 +1,145 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
import com.usatiuk.dhfs.objects.*;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.snapshot.SnapshotManager;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.*;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TransactionFactoryImpl implements TransactionFactory {
|
||||
@Inject
|
||||
SnapshotManager snapshotManager;
|
||||
@Inject
|
||||
ReadTrackingObjectSourceFactory readTrackingObjectSourceFactory;
|
||||
|
||||
@Override
|
||||
public TransactionPrivate createTransaction() {
|
||||
return new TransactionImpl();
|
||||
}
|
||||
|
||||
private class TransactionImpl implements TransactionPrivate {
|
||||
private final ReadTrackingTransactionObjectSource _source;
|
||||
|
||||
private final NavigableMap<JObjectKey, TxRecord.TxObjectRecord<?>> _writes = new TreeMap<>();
|
||||
|
||||
private Map<JObjectKey, TxRecord.TxObjectRecord<?>> _newWrites = new HashMap<>();
|
||||
private final List<Runnable> _onCommit = new ArrayList<>();
|
||||
private final List<Runnable> _onFlush = new ArrayList<>();
|
||||
private final SnapshotManager.Snapshot _snapshot;
|
||||
|
||||
private TransactionImpl() {
|
||||
_snapshot = snapshotManager.createSnapshot();
|
||||
_source = readTrackingObjectSourceFactory.create(_snapshot);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCommit(Runnable runnable) {
|
||||
_onCommit.add(runnable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
_onFlush.add(runnable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Runnable> getOnCommit() {
|
||||
return Collections.unmodifiableCollection(_onCommit);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotManager.Snapshot snapshot() {
|
||||
return _snapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Runnable> getOnFlush() {
|
||||
return Collections.unmodifiableCollection(_onFlush);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
switch (_writes.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
return Optional.of(type.cast(write.data()));
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
return Optional.empty();
|
||||
}
|
||||
case null, default -> {
|
||||
}
|
||||
}
|
||||
|
||||
return switch (strategy) {
|
||||
case OPTIMISTIC -> _source.get(type, key);
|
||||
case WRITE -> _source.getWriteLocked(type, key);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(JObjectKey key) {
|
||||
var got = _writes.get(key);
|
||||
if (got != null) {
|
||||
if (got instanceof TxRecord.TxObjectRecordDeleted) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_writes.put(key, new TxRecord.TxObjectRecordDeleted(key));
|
||||
_newWrites.put(key, new TxRecord.TxObjectRecordDeleted(key));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Collection<JObjectKey> findAllObjects() {
|
||||
// return store.findAllObjects();
|
||||
return List.of();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||
Log.tracev("Getting tx iterator with start={0}, key={1}", start, key);
|
||||
return new TombstoneMergingKvIterator<>("tx", start, key,
|
||||
(tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK), t -> switch (t) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> new Data<>(write.data());
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> new Tombstone<>();
|
||||
case null, default -> null;
|
||||
}),
|
||||
(tS, tK) -> new MappingKvIterator<>(_source.getIterator(tS, tK), Data::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(JData obj) {
|
||||
_writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
_newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<TxRecord.TxObjectRecord<?>> drainNewWrites() {
|
||||
var ret = _newWrites;
|
||||
_newWrites = new HashMap<>();
|
||||
return ret.values();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<JObjectKey, TransactionObject<?>> reads() {
|
||||
return _source.getRead();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReadTrackingTransactionObjectSource readSource() {
|
||||
return _source;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_source.close();
|
||||
_snapshot.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
public interface TransactionHandle {
|
||||
void onFlush(Runnable runnable);
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
public interface TransactionHandlePrivate extends TransactionHandle {
|
||||
Collection<Runnable> getOnFlush();
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
import com.usatiuk.dhfs.objects.JData;
|
||||
import com.usatiuk.dhfs.objects.JDataVersionedWrapper;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public interface TransactionObject<T extends JData> {
|
||||
Optional<JDataVersionedWrapper> data();
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.objects.snapshot.SnapshotManager;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
// The transaction interface actually used by user code to retrieve objects
|
||||
public interface TransactionPrivate extends Transaction, TransactionHandlePrivate, AutoCloseableNoThrow {
|
||||
Collection<TxRecord.TxObjectRecord<?>> drainNewWrites();
|
||||
|
||||
Map<JObjectKey, TransactionObject<?>> reads();
|
||||
|
||||
ReadTrackingTransactionObjectSource readSource();
|
||||
|
||||
Collection<Runnable> getOnCommit();
|
||||
|
||||
SnapshotManager.Snapshot snapshot();
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
package com.usatiuk.dhfs.objects.transaction;
|
||||
|
||||
import com.usatiuk.dhfs.objects.JData;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
|
||||
public class TxRecord {
|
||||
public interface TxObjectRecord<T> {
|
||||
JObjectKey key();
|
||||
}
|
||||
|
||||
public record TxObjectRecordWrite<T extends JData>(JData data) implements TxObjectRecord<T> {
|
||||
@Override
|
||||
public JObjectKey key() {
|
||||
return data.key();
|
||||
}
|
||||
}
|
||||
|
||||
public record TxObjectRecordDeleted(JObjectKey key) implements TxObjectRecord<JData> {
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
dhfs.objects.persistence=lmdb
|
||||
dhfs.objects.writeback.limit=134217728
|
||||
dhfs.objects.lru.limit=134217728
|
||||
dhfs.objects.lru.print-stats=true
|
||||
dhfs.objects.lock_timeout_secs=15
|
||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs
|
||||
quarkus.package.jar.decompiler.enabled=true
|
||||
dhfs.objects.persistence.snapshot-extra-checks=false
|
||||
@@ -0,0 +1,79 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
public abstract class Just {
|
||||
public static void run(Callable<?> callable) {
|
||||
new Thread(() -> {
|
||||
try {
|
||||
callable.call();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}).start();
|
||||
}
|
||||
|
||||
public static void runAll(Callable<?>... callables) {
|
||||
try {
|
||||
try (var exs = Executors.newFixedThreadPool(callables.length)) {
|
||||
exs.invokeAll(Arrays.stream(callables).map(c -> (Callable<?>) () -> {
|
||||
try {
|
||||
return c.call();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}).toList()).forEach(f -> {
|
||||
try {
|
||||
f.get();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void runAll(Runnable... callables) {
|
||||
try {
|
||||
try (var exs = Executors.newFixedThreadPool(callables.length)) {
|
||||
exs.invokeAll(Arrays.stream(callables).map(c -> (Callable<?>) () -> {
|
||||
try {
|
||||
c.run();
|
||||
return null;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}).toList()).forEach(f -> {
|
||||
try {
|
||||
f.get();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static <K> void checkIterator(Iterator<K> it, List<K> expected) {
|
||||
for (var e : expected) {
|
||||
Assertions.assertTrue(it.hasNext());
|
||||
var next = it.next();
|
||||
Assertions.assertEquals(e, next);
|
||||
}
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
public static <K> void checkIterator(Iterator<K> it, K... expected) {
|
||||
checkIterator(it, Arrays.asList(expected));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,154 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class KeyPredicateKvIteratorTest {
|
||||
|
||||
@Test
|
||||
public void simpleTest() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(3, 3).plus(5, 5).plus(6, 6);
|
||||
var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.GT, 3),
|
||||
IteratorStart.GE, 3, v -> (v % 2 == 0));
|
||||
var expected = List.of(Pair.of(6, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(pair, pit.next());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(3, 3).plus(5, 5).plus(6, 6);
|
||||
var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0));
|
||||
var expected = List.of(Pair.of(6, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(pair, pit.next());
|
||||
}
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest2() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(3, 3).plus(5, 5).plus(6, 6);
|
||||
var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 3),
|
||||
IteratorStart.LT, 2, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 5),
|
||||
IteratorStart.LE, 5, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest3() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(3, 3).plus(5, 5).plus(6, 6).plus(7, 7).plus(8, 8);
|
||||
var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7),
|
||||
IteratorStart.LT, 7, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 8),
|
||||
IteratorStart.LT, 8, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 6),
|
||||
IteratorStart.LE, 6, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0));
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertFalse(pit.hasPrev());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertFalse(pit.hasPrev());
|
||||
Assertions.assertEquals(Pair.of(6, 6), pit.next());
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(8, pit.peekNextKey());
|
||||
Assertions.assertEquals(6, pit.peekPrevKey());
|
||||
Assertions.assertEquals(8, pit.peekNextKey());
|
||||
Assertions.assertEquals(6, pit.peekPrevKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void itTest4() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(3, 3).plus(5, 5).plus(6, 6).plus(8, 8).plus(10, 10);
|
||||
var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7),
|
||||
IteratorStart.LT, 7, v -> (v % 2 == 0));
|
||||
Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0));
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertFalse(pit.hasPrev());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(Pair.of(6, 6), pit.next());
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(8, pit.peekNextKey());
|
||||
Assertions.assertEquals(6, pit.peekPrevKey());
|
||||
Assertions.assertEquals(8, pit.peekNextKey());
|
||||
Assertions.assertEquals(6, pit.peekPrevKey());
|
||||
}
|
||||
|
||||
// @Test
|
||||
// public void reverseTest() {
|
||||
// var source1 = TreePMap.<Integer, Integer>empty().plus(3, 3).plus(5, 5).plus(6, 6);
|
||||
// var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
// IteratorStart.LT, 4, v -> (v % 2 == 0) );
|
||||
//
|
||||
// }
|
||||
}
|
||||
@@ -0,0 +1,348 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
public class MergingKvIteratorTest {
|
||||
|
||||
private class SimpleIteratorWrapper<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
|
||||
private final Iterator<Pair<K, V>> _iterator;
|
||||
private Pair<K, V> _next;
|
||||
|
||||
public SimpleIteratorWrapper(Iterator<Pair<K, V>> iterator) {
|
||||
_iterator = iterator;
|
||||
fillNext();
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
while (_iterator.hasNext() && _next == null) {
|
||||
_next = _iterator.next();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekNextKey() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
return _next.getKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
_next = null;
|
||||
fillNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekPrevKey() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> prev() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
throw new UnsupportedOperationException();
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _next != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> next() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
var ret = _next;
|
||||
_next = null;
|
||||
fillNext();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTestIterator() {
|
||||
var list = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6));
|
||||
var iterator = new SimpleIteratorWrapper<>(list.iterator());
|
||||
var realIterator = list.iterator();
|
||||
while (realIterator.hasNext()) {
|
||||
Assertions.assertTrue(iterator.hasNext());
|
||||
Assertions.assertEquals(realIterator.next(), iterator.next());
|
||||
}
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
var emptyList = List.<Pair<Integer, Integer>>of();
|
||||
var emptyIterator = new SimpleIteratorWrapper<>(emptyList.iterator());
|
||||
Assertions.assertFalse(emptyIterator.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimple() {
|
||||
var source1 = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)).iterator();
|
||||
var source2 = List.of(Pair.of(2, 3), Pair.of(4, 5), Pair.of(6, 7)).iterator();
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1), (a, b) -> new SimpleIteratorWrapper<>(source2));
|
||||
var expected = List.of(Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4), Pair.of(4, 5), Pair.of(5, 6), Pair.of(6, 7));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriority() {
|
||||
var source1 = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6));
|
||||
var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7));
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()));
|
||||
var expected = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()));
|
||||
var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriority2() {
|
||||
var source1 = List.of(Pair.of(2, 4), Pair.of(5, 6));
|
||||
var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5));
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()));
|
||||
var expected = List.of(Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()));
|
||||
var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
Just.checkIterator(mergingIterator.reversed(), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3));
|
||||
Assertions.assertFalse(mergingIterator.reversed().hasNext());
|
||||
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6));
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(5, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
Just.checkIterator(mergingIterator2.reversed(), Pair.of(5, 6), Pair.of(2, 5), Pair.of(1, 3));
|
||||
Assertions.assertFalse(mergingIterator2.reversed().hasNext());
|
||||
Just.checkIterator(mergingIterator2, Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6));
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
|
||||
var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe2() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe3() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(6, 8);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(5, 6), Pair.of(6, 8));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
Just.checkIterator(mergingIterator.reversed(), Pair.of(6, 8), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3));
|
||||
Assertions.assertFalse(mergingIterator.reversed().hasNext());
|
||||
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6), Pair.of(6, 8));
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(5, 6), Pair.of(6, 8));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
|
||||
var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
|
||||
Assertions.assertTrue(mergingIterator3.hasPrev());
|
||||
Assertions.assertTrue(mergingIterator3.hasNext());
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe4() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(6, 7);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(3, 4), Pair.of(6, 7));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe5() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 2).plus(6, 7);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(3, 4), Pair.of(6, 7));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe6() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(4, 6);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(4, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(4, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe7() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 4).plus(3, 5).plus(4, 6);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
Just.checkIterator(mergingIterator.reversed(), Pair.of(4, 6), Pair.of(3, 5), Pair.of(1, 3));
|
||||
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6));
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(1, 4), Pair.of(3, 5), Pair.of(4, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLt() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(2, 4), Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(2, 5), Pair.of(5, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import java.util.NavigableMap;
|
||||
|
||||
public class NavigableMapKvIteratorTest {
|
||||
private final NavigableMap<Integer, Integer> _testMap1 = TreePMap.<Integer, Integer>empty().plus(1, 2).plus(2, 3).plus(3, 4);
|
||||
|
||||
@Test
|
||||
void test1() {
|
||||
var iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 3);
|
||||
Just.checkIterator(iterator, Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 2);
|
||||
Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GE, 2);
|
||||
Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 2);
|
||||
Just.checkIterator(iterator, Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 3);
|
||||
Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 2);
|
||||
Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 1);
|
||||
Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 1);
|
||||
Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 3);
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 4);
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 0);
|
||||
Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GE, 2);
|
||||
Assertions.assertTrue(iterator.hasNext());
|
||||
Assertions.assertEquals(2, iterator.peekNextKey());
|
||||
Assertions.assertEquals(1, iterator.peekPrevKey());
|
||||
Assertions.assertEquals(2, iterator.peekNextKey());
|
||||
Assertions.assertEquals(1, iterator.peekPrevKey());
|
||||
Just.checkIterator(iterator.reversed(), Pair.of(1, 2));
|
||||
Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4));
|
||||
Assertions.assertEquals(Pair.of(3, 4), iterator.prev());
|
||||
Assertions.assertEquals(Pair.of(2, 3), iterator.prev());
|
||||
Assertions.assertEquals(Pair.of(2, 3), iterator.next());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
|
||||
@QuarkusTest
|
||||
@TestProfile(Profiles.ObjectsTestProfileExtraChecks.class)
|
||||
public class ObjectsTestExtraChecks extends ObjectsTestImpl {
|
||||
}
|
||||
@@ -0,0 +1,941 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.data.Parent;
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import com.usatiuk.dhfs.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.dhfs.objects.transaction.Transaction;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.RepeatedTest;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
class Profiles {
|
||||
public static class ObjectsTestProfileExtraChecks extends TempDataProfile {
|
||||
@Override
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
toPut.put("dhfs.objects.persistence.snapshot-extra-checks", "true");
|
||||
}
|
||||
}
|
||||
|
||||
public static class ObjectsTestProfileNoExtraChecks extends TempDataProfile {
|
||||
@Override
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
toPut.put("dhfs.objects.persistence.snapshot-extra-checks", "false");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public abstract class ObjectsTestImpl {
|
||||
@Inject
|
||||
TransactionManager txm;
|
||||
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
|
||||
private void deleteAndCheck(JObjectKey key) {
|
||||
txm.run(() -> {
|
||||
curTx.delete(key);
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(JData.class, key).orElse(null);
|
||||
Assertions.assertNull(parent);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void createObject() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("ParentCreate"), "John");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate")).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void createGetObject() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("ParentCreateGet"), "John");
|
||||
curTx.put(newParent);
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateGet")).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("ParentCreateGet")).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
});
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void createDeleteObject() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("ParentCreateDeleteObject"), "John");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateDeleteObject")).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
curTx.delete(new JObjectKey("ParentCreateDeleteObject"));
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("ParentCreateDeleteObject")).orElse(null);
|
||||
Assertions.assertNull(parent);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void createCreateObject() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("Parent7"), "John");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("Parent7"), "John2");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("Parent7")).orElse(null);
|
||||
Assertions.assertEquals("John2", parent.name());
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void editObject() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("Parent3"), "John");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
curTx.put(parent.withName("John2"));
|
||||
});
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.WRITE).orElse(null);
|
||||
Assertions.assertEquals("John2", parent.name());
|
||||
curTx.put(parent.withName("John3"));
|
||||
});
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("Parent3")).orElse(null);
|
||||
Assertions.assertEquals("John3", parent.name());
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
@Disabled
|
||||
void createObjectConflict() {
|
||||
AtomicBoolean thread1Failed = new AtomicBoolean(true);
|
||||
AtomicBoolean thread2Failed = new AtomicBoolean(true);
|
||||
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var latch = new CountDownLatch(2);
|
||||
|
||||
Just.run(() -> {
|
||||
try {
|
||||
Log.warn("Thread 1");
|
||||
txm.runTries(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null);
|
||||
var newParent = new Parent(JObjectKey.of("Parent2"), "John");
|
||||
curTx.put(newParent);
|
||||
Log.warn("Thread 1 commit");
|
||||
}, 0);
|
||||
thread1Failed.set(false);
|
||||
return null;
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
Just.run(() -> {
|
||||
try {
|
||||
Log.warn("Thread 2");
|
||||
txm.runTries(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null);
|
||||
var newParent = new Parent(JObjectKey.of("Parent2"), "John2");
|
||||
curTx.put(newParent);
|
||||
Log.warn("Thread 2 commit");
|
||||
}, 0);
|
||||
thread2Failed.set(false);
|
||||
return null;
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
latch.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
var got = txm.run(() -> {
|
||||
return curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null);
|
||||
});
|
||||
|
||||
if (!thread1Failed.get()) {
|
||||
Assertions.assertTrue(thread2Failed.get());
|
||||
Assertions.assertEquals("John", got.name());
|
||||
} else if (!thread2Failed.get()) {
|
||||
Assertions.assertEquals("John2", got.name());
|
||||
} else {
|
||||
Assertions.fail("No thread succeeded");
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(LockingStrategy.class)
|
||||
void editConflict(LockingStrategy strategy) {
|
||||
String key = "Parent4" + strategy.name();
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of(key), "John3");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
AtomicBoolean thread1Failed = new AtomicBoolean(true);
|
||||
AtomicBoolean thread2Failed = new AtomicBoolean(true);
|
||||
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var latchEnd = new CountDownLatch(2);
|
||||
|
||||
Just.run(() -> {
|
||||
try {
|
||||
Log.warn("Thread 1");
|
||||
txm.runTries(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null);
|
||||
curTx.put(parent.withName("John"));
|
||||
Log.warn("Thread 1 commit");
|
||||
}, 0);
|
||||
Log.warn("Thread 1 commit done");
|
||||
thread1Failed.set(false);
|
||||
return null;
|
||||
} finally {
|
||||
latchEnd.countDown();
|
||||
}
|
||||
});
|
||||
Just.run(() -> {
|
||||
try {
|
||||
Log.warn("Thread 2");
|
||||
barrier.await(); // Ensure thread 2 tx id is larger than thread 1
|
||||
txm.runTries(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null);
|
||||
curTx.put(parent.withName("John2"));
|
||||
Log.warn("Thread 2 commit");
|
||||
}, 0);
|
||||
Log.warn("Thread 2 commit done");
|
||||
thread2Failed.set(false);
|
||||
return null;
|
||||
} finally {
|
||||
latchEnd.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
latchEnd.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
var got = txm.run(() -> {
|
||||
return curTx.get(Parent.class, new JObjectKey(key)).orElse(null);
|
||||
});
|
||||
|
||||
if (!thread1Failed.get() && !thread2Failed.get()) {
|
||||
Assertions.assertTrue(got.name().equals("John") || got.name().equals("John2"));
|
||||
return;
|
||||
}
|
||||
|
||||
Assertions.assertFalse(!thread1Failed.get() && !thread2Failed.get());
|
||||
|
||||
if (!thread1Failed.get()) {
|
||||
if (!thread2Failed.get()) {
|
||||
Assertions.assertEquals("John2", got.name());
|
||||
} else {
|
||||
Assertions.assertEquals("John", got.name());
|
||||
}
|
||||
} else {
|
||||
Assertions.assertFalse(thread2Failed.get());
|
||||
Assertions.assertEquals("John2", got.name());
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(LockingStrategy.class)
|
||||
void editConflict2(LockingStrategy strategy) {
|
||||
String key = "EditConflict2" + strategy.name();
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of(key), "John3");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
AtomicBoolean thread1Failed = new AtomicBoolean(true);
|
||||
AtomicBoolean thread2Failed = new AtomicBoolean(true);
|
||||
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var latchEnd = new CountDownLatch(2);
|
||||
|
||||
Just.run(() -> {
|
||||
try {
|
||||
Log.warn("Thread 1");
|
||||
txm.runTries(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null);
|
||||
curTx.put(parent.withName("John"));
|
||||
Log.warn("Thread 1 commit");
|
||||
}, 0);
|
||||
Log.warn("Thread 1 commit done");
|
||||
thread1Failed.set(false);
|
||||
return null;
|
||||
} finally {
|
||||
latchEnd.countDown();
|
||||
}
|
||||
});
|
||||
Just.run(() -> {
|
||||
try {
|
||||
Log.warn("Thread 2");
|
||||
txm.runTries(() -> {
|
||||
// Ensure they will conflict
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null);
|
||||
curTx.put(parent.withName("John2"));
|
||||
Log.warn("Thread 2 commit");
|
||||
}, 0);
|
||||
Log.warn("Thread 2 commit done");
|
||||
thread2Failed.set(false);
|
||||
return null;
|
||||
} finally {
|
||||
latchEnd.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
latchEnd.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
var got = txm.run(() -> {
|
||||
return curTx.get(Parent.class, new JObjectKey(key)).orElse(null);
|
||||
});
|
||||
|
||||
Assertions.assertFalse(!thread1Failed.get() && !thread2Failed.get());
|
||||
|
||||
if (!thread1Failed.get()) {
|
||||
if (!thread2Failed.get()) {
|
||||
Assertions.assertEquals("John2", got.name());
|
||||
} else {
|
||||
Assertions.assertEquals("John", got.name());
|
||||
}
|
||||
} else {
|
||||
Assertions.assertFalse(thread2Failed.get());
|
||||
Assertions.assertEquals("John2", got.name());
|
||||
}
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void snapshotTest1() {
|
||||
var key = "SnapshotTest1";
|
||||
var barrier1 = new CyclicBarrier(2);
|
||||
var barrier2 = new CyclicBarrier(2);
|
||||
try (ExecutorService ex = Executors.newFixedThreadPool(3)) {
|
||||
ex.invokeAll(List.of(
|
||||
() -> {
|
||||
barrier1.await();
|
||||
Log.info("Thread 2 starting tx");
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 2 started tx");
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John"));
|
||||
Log.info("Thread 2 committing");
|
||||
});
|
||||
Log.info("Thread 2 commited");
|
||||
try {
|
||||
barrier2.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return null;
|
||||
},
|
||||
() -> {
|
||||
Log.info("Thread 1 starting tx");
|
||||
txm.run(() -> {
|
||||
try {
|
||||
Log.info("Thread 1 started tx");
|
||||
barrier1.await();
|
||||
barrier2.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
Log.info("Thread 1 reading");
|
||||
Assertions.assertTrue(curTx.get(Parent.class, new JObjectKey(key)).isEmpty());
|
||||
Log.info("Thread 1 done reading");
|
||||
});
|
||||
Log.info("Thread 1 finished");
|
||||
return null;
|
||||
}
|
||||
)).forEach(f -> {
|
||||
try {
|
||||
f.get();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
txm.run(() -> {
|
||||
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
|
||||
});
|
||||
deleteAndCheck(new JObjectKey(key));
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void snapshotTest2() {
|
||||
var key = "SnapshotTest2";
|
||||
var barrier1 = new CyclicBarrier(2);
|
||||
var barrier2 = new CyclicBarrier(2);
|
||||
txm.run(() -> {
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John"));
|
||||
});
|
||||
try (ExecutorService ex = Executors.newFixedThreadPool(3)) {
|
||||
ex.invokeAll(List.of(
|
||||
() -> {
|
||||
barrier1.await();
|
||||
Log.info("Thread 2 starting tx");
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 2 started tx");
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John2"));
|
||||
Log.info("Thread 2 committing");
|
||||
});
|
||||
Log.info("Thread 2 commited");
|
||||
try {
|
||||
barrier2.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return null;
|
||||
},
|
||||
() -> {
|
||||
Log.info("Thread 1 starting tx");
|
||||
txm.run(() -> {
|
||||
try {
|
||||
Log.info("Thread 1 started tx");
|
||||
barrier1.await();
|
||||
barrier2.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
Log.info("Thread 1 reading");
|
||||
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
|
||||
Log.info("Thread 1 done reading");
|
||||
});
|
||||
Log.info("Thread 1 finished");
|
||||
return null;
|
||||
}
|
||||
)).forEach(f -> {
|
||||
try {
|
||||
f.get();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
txm.run(() -> {
|
||||
Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
|
||||
});
|
||||
deleteAndCheck(new JObjectKey(key));
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void snapshotTest3() {
|
||||
var key = "SnapshotTest3";
|
||||
var barrier0 = new CountDownLatch(1);
|
||||
var barrier1 = new CyclicBarrier(2);
|
||||
var barrier2 = new CyclicBarrier(2);
|
||||
txm.run(() -> {
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John"));
|
||||
}).onFlush(barrier0::countDown);
|
||||
try {
|
||||
barrier0.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
try (ExecutorService ex = Executors.newFixedThreadPool(3)) {
|
||||
ex.invokeAll(List.of(
|
||||
() -> {
|
||||
barrier1.await();
|
||||
Log.info("Thread 2 starting tx");
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 2 started tx");
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John2"));
|
||||
Log.info("Thread 2 committing");
|
||||
});
|
||||
Log.info("Thread 2 commited");
|
||||
try {
|
||||
barrier2.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return null;
|
||||
},
|
||||
() -> {
|
||||
Log.info("Thread 1 starting tx");
|
||||
txm.run(() -> {
|
||||
try {
|
||||
Log.info("Thread 1 started tx");
|
||||
barrier1.await();
|
||||
barrier2.await();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
Log.info("Thread 1 reading");
|
||||
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
|
||||
Log.info("Thread 1 done reading");
|
||||
});
|
||||
Log.info("Thread 1 finished");
|
||||
return null;
|
||||
}
|
||||
)).forEach(f -> {
|
||||
try {
|
||||
f.get();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
txm.run(() -> {
|
||||
Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
|
||||
});
|
||||
deleteAndCheck(new JObjectKey(key));
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void simpleIterator1() {
|
||||
var key = "SimpleIterator1";
|
||||
var key1 = key + "_1";
|
||||
var key2 = key + "_2";
|
||||
var key3 = key + "_3";
|
||||
var key4 = key + "_4";
|
||||
txm.run(() -> {
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John"));
|
||||
curTx.put(new Parent(JObjectKey.of(key1), "John1"));
|
||||
curTx.put(new Parent(JObjectKey.of(key2), "John2"));
|
||||
curTx.put(new Parent(JObjectKey.of(key3), "John3"));
|
||||
curTx.put(new Parent(JObjectKey.of(key4), "John4"));
|
||||
});
|
||||
txm.run(() -> {
|
||||
var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key));
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key2, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key3, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
iter.close();
|
||||
});
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void simpleIterator2() {
|
||||
var key = "SimpleIterator2";
|
||||
var key1 = key + "_1";
|
||||
var key2 = key + "_2";
|
||||
var key3 = key + "_3";
|
||||
var key4 = key + "_4";
|
||||
txm.run(() -> {
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John"));
|
||||
curTx.put(new Parent(JObjectKey.of(key1), "John1"));
|
||||
curTx.put(new Parent(JObjectKey.of(key2), "John2"));
|
||||
curTx.put(new Parent(JObjectKey.of(key3), "John3"));
|
||||
curTx.put(new Parent(JObjectKey.of(key4), "John4"));
|
||||
});
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key2, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key3, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
});
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.LT, new JObjectKey(key + "_5"))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
Assertions.assertTrue(iter.hasPrev());
|
||||
got = iter.prev();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
Assertions.assertTrue(iter.hasNext());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
});
|
||||
txm.run(() -> {
|
||||
curTx.delete(new JObjectKey(key));
|
||||
curTx.delete(new JObjectKey(key1));
|
||||
curTx.delete(new JObjectKey(key2));
|
||||
curTx.delete(new JObjectKey(key3));
|
||||
curTx.delete(new JObjectKey(key4));
|
||||
});
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void concurrentIterator1() {
|
||||
var key = "ConcurrentIterator1";
|
||||
var key1 = key + "_1";
|
||||
var key2 = key + "_2";
|
||||
var key3 = key + "_3";
|
||||
var key4 = key + "_4";
|
||||
txm.run(() -> {
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John"));
|
||||
curTx.put(new Parent(JObjectKey.of(key1), "John1"));
|
||||
curTx.put(new Parent(JObjectKey.of(key4), "John4"));
|
||||
});
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var barrier2 = new CyclicBarrier(2);
|
||||
Just.runAll(() -> {
|
||||
barrier.await();
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 1 starting tx");
|
||||
try {
|
||||
barrier2.await();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
curTx.put(new Parent(JObjectKey.of(key2), "John2"));
|
||||
curTx.put(new Parent(JObjectKey.of(key3), "John3"));
|
||||
Log.info("Thread 1 committing");
|
||||
});
|
||||
Log.info("Thread 1 commited");
|
||||
return null;
|
||||
}, () -> {
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 2 starting tx");
|
||||
try {
|
||||
barrier.await();
|
||||
barrier2.await();
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Thread 2 finished");
|
||||
return null;
|
||||
});
|
||||
Log.info("All threads finished");
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key2, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key3, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
});
|
||||
txm.run(() -> {
|
||||
curTx.delete(new JObjectKey(key));
|
||||
curTx.delete(new JObjectKey(key1));
|
||||
curTx.delete(new JObjectKey(key2));
|
||||
curTx.delete(new JObjectKey(key3));
|
||||
curTx.delete(new JObjectKey(key4));
|
||||
});
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void concurrentIterator2() {
|
||||
var key = "ConcurrentIterator2";
|
||||
var key1 = key + "_1";
|
||||
var key2 = key + "_2";
|
||||
var key3 = key + "_3";
|
||||
var key4 = key + "_4";
|
||||
txm.run(() -> {
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John"));
|
||||
curTx.put(new Parent(JObjectKey.of(key1), "John1"));
|
||||
curTx.put(new Parent(JObjectKey.of(key2), "John2"));
|
||||
curTx.put(new Parent(JObjectKey.of(key4), "John4"));
|
||||
});
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var barrier2 = new CyclicBarrier(2);
|
||||
Just.runAll(() -> {
|
||||
barrier.await();
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 1 starting tx");
|
||||
try {
|
||||
barrier2.await();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
curTx.put(new Parent(JObjectKey.of(key2), "John5"));
|
||||
curTx.put(new Parent(JObjectKey.of(key3), "John3"));
|
||||
Log.info("Thread 1 committing");
|
||||
});
|
||||
Log.info("Thread 1 commited");
|
||||
return null;
|
||||
}, () -> {
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 2 starting tx");
|
||||
try {
|
||||
barrier.await();
|
||||
barrier2.await();
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key2, got.getKey().name());
|
||||
Assertions.assertEquals("John2", ((Parent) got.getValue()).name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Thread 2 finished");
|
||||
return null;
|
||||
});
|
||||
Log.info("All threads finished");
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key2, got.getKey().name());
|
||||
Assertions.assertEquals("John5", ((Parent) got.getValue()).name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key3, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
});
|
||||
txm.run(() -> {
|
||||
curTx.delete(new JObjectKey(key));
|
||||
curTx.delete(new JObjectKey(key1));
|
||||
curTx.delete(new JObjectKey(key2));
|
||||
curTx.delete(new JObjectKey(key3));
|
||||
curTx.delete(new JObjectKey(key4));
|
||||
});
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void concurrentIterator3() {
|
||||
var key = "ConcurrentIterator3";
|
||||
var key1 = key + "_1";
|
||||
var key2 = key + "_2";
|
||||
var key3 = key + "_3";
|
||||
var key4 = key + "_4";
|
||||
txm.run(() -> {
|
||||
curTx.put(new Parent(JObjectKey.of(key), "John"));
|
||||
curTx.put(new Parent(JObjectKey.of(key1), "John1"));
|
||||
curTx.put(new Parent(JObjectKey.of(key2), "John2"));
|
||||
curTx.put(new Parent(JObjectKey.of(key4), "John4"));
|
||||
});
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var barrier2 = new CyclicBarrier(2);
|
||||
Just.runAll(() -> {
|
||||
barrier.await();
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 1 starting tx");
|
||||
try {
|
||||
barrier2.await();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
curTx.put(new Parent(JObjectKey.of(key3), "John3"));
|
||||
curTx.delete(new JObjectKey(key2));
|
||||
Log.info("Thread 1 committing");
|
||||
});
|
||||
Log.info("Thread 1 commited");
|
||||
return null;
|
||||
}, () -> {
|
||||
txm.run(() -> {
|
||||
Log.info("Thread 2 starting tx");
|
||||
try {
|
||||
barrier.await();
|
||||
barrier2.await();
|
||||
try (var iter = curTx.getIterator(IteratorStart.LE, new JObjectKey(key3))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key2, got.getKey().name());
|
||||
Assertions.assertEquals("John2", ((Parent) got.getValue()).name());
|
||||
Assertions.assertTrue(iter.hasNext());
|
||||
Assertions.assertTrue(iter.hasPrev());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
Assertions.assertTrue(iter.hasPrev());
|
||||
got = iter.prev();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
Assertions.assertTrue(iter.hasPrev());
|
||||
got = iter.prev();
|
||||
Assertions.assertEquals("John2", ((Parent) got.getValue()).name());
|
||||
Assertions.assertTrue(iter.hasPrev());
|
||||
got = iter.prev();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
Assertions.assertTrue(iter.hasNext());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key2, got.getKey().name());
|
||||
Assertions.assertEquals("John2", ((Parent) got.getValue()).name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key2, got.getKey().name());
|
||||
Assertions.assertEquals("John2", ((Parent) got.getValue()).name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Thread 2 finished");
|
||||
return null;
|
||||
});
|
||||
Log.info("All threads finished");
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
var got = iter.next();
|
||||
Assertions.assertEquals(key1, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key3, got.getKey().name());
|
||||
got = iter.next();
|
||||
Assertions.assertEquals(key4, got.getKey().name());
|
||||
}
|
||||
});
|
||||
txm.run(() -> {
|
||||
curTx.delete(new JObjectKey(key));
|
||||
curTx.delete(new JObjectKey(key1));
|
||||
curTx.delete(new JObjectKey(key3));
|
||||
curTx.delete(new JObjectKey(key4));
|
||||
});
|
||||
txm.run(() -> {
|
||||
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
|
||||
Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
void allParallel() {
|
||||
Just.runAll(
|
||||
() -> createObject(),
|
||||
() -> createGetObject(),
|
||||
() -> createDeleteObject(),
|
||||
() -> createCreateObject(),
|
||||
() -> editConflict(LockingStrategy.WRITE),
|
||||
() -> editConflict(LockingStrategy.OPTIMISTIC),
|
||||
() -> editConflict2(LockingStrategy.WRITE),
|
||||
() -> editConflict2(LockingStrategy.OPTIMISTIC),
|
||||
() -> snapshotTest1(),
|
||||
() -> snapshotTest2(),
|
||||
() -> snapshotTest3(),
|
||||
() -> simpleIterator1(),
|
||||
() -> simpleIterator2(),
|
||||
() -> concurrentIterator1(),
|
||||
() -> concurrentIterator2(),
|
||||
() -> concurrentIterator3()
|
||||
);
|
||||
}
|
||||
|
||||
// }
|
||||
//
|
||||
// @Test
|
||||
// void nestedCreate() {
|
||||
// {
|
||||
// var tx = _tx.beginTransaction();
|
||||
// var parent = tx.getObject(new JObjectKey("Parent"), Parent.class);
|
||||
// var kid = tx.getObject(new JObjectKey("Kid"), Kid.class);
|
||||
// parent.setName("John");
|
||||
// kid.setName("KidName");
|
||||
// parent.setKidKey(kid.getKey());
|
||||
// tx.commit();
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// var tx2 = _tx.beginTransaction();
|
||||
// var parent = tx2.getObject(new JObjectKey("Parent"));
|
||||
// Assertions.assertInstanceOf(Parent.class, parent);
|
||||
// Assertions.assertEquals("John", ((Parent) parent).getName());
|
||||
// Assertions.assertEquals("KidName", ((Parent) parent).getKid().getName());
|
||||
// }
|
||||
// }
|
||||
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
|
||||
@QuarkusTest
|
||||
@TestProfile(Profiles.ObjectsTestProfileNoExtraChecks.class)
|
||||
public class ObjectsTestNoExtraChecks extends ObjectsTestImpl {
|
||||
}
|
||||
@@ -0,0 +1,115 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.data.Parent;
|
||||
import com.usatiuk.dhfs.objects.transaction.Transaction;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
import io.quarkus.test.junit.mockito.InjectSpy;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
@QuarkusTest
|
||||
@TestProfile(TempDataProfile.class)
|
||||
public class PreCommitTxHookTest {
|
||||
@Inject
|
||||
TransactionManager txm;
|
||||
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
@InjectSpy
|
||||
private DummyPreCommitTxHook spyHook;
|
||||
|
||||
@Test
|
||||
void createObject() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("ParentCreate2"), "John");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate2")).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
});
|
||||
|
||||
ArgumentCaptor<JData> dataCaptor = ArgumentCaptor.forClass(JData.class);
|
||||
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKey.class);
|
||||
Mockito.verify(spyHook, Mockito.times(1)).onCreate(keyCaptor.capture(), dataCaptor.capture());
|
||||
Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name());
|
||||
Assertions.assertEquals(new JObjectKey("ParentCreate2"), keyCaptor.getValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteObject() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("ParentDel"), "John");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("ParentDel")).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
curTx.delete(new JObjectKey("ParentDel"));
|
||||
});
|
||||
|
||||
ArgumentCaptor<JData> dataCaptor = ArgumentCaptor.forClass(JData.class);
|
||||
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKey.class);
|
||||
Mockito.verify(spyHook, Mockito.times(1)).onDelete(keyCaptor.capture(), dataCaptor.capture());
|
||||
Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name());
|
||||
Assertions.assertEquals(new JObjectKey("ParentDel"), keyCaptor.getValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
void editObject() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("ParentEdit"), "John");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("ParentEdit"), "John changed");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
ArgumentCaptor<JData> dataCaptorOld = ArgumentCaptor.forClass(JData.class);
|
||||
ArgumentCaptor<JData> dataCaptorNew = ArgumentCaptor.forClass(JData.class);
|
||||
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKey.class);
|
||||
Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture());
|
||||
Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).name());
|
||||
Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).name());
|
||||
Assertions.assertEquals(new JObjectKey("ParentEdit"), keyCaptor.getValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
void editObjectWithGet() {
|
||||
txm.run(() -> {
|
||||
var newParent = new Parent(JObjectKey.of("ParentEdit2"), "John");
|
||||
curTx.put(newParent);
|
||||
});
|
||||
|
||||
txm.run(() -> {
|
||||
var parent = curTx.get(Parent.class, new JObjectKey("ParentEdit2")).orElse(null);
|
||||
Assertions.assertEquals("John", parent.name());
|
||||
curTx.put(parent.withName("John changed"));
|
||||
});
|
||||
|
||||
ArgumentCaptor<JData> dataCaptorOld = ArgumentCaptor.forClass(JData.class);
|
||||
ArgumentCaptor<JData> dataCaptorNew = ArgumentCaptor.forClass(JData.class);
|
||||
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKey.class);
|
||||
Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture());
|
||||
Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).name());
|
||||
Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).name());
|
||||
Assertions.assertEquals(new JObjectKey("ParentEdit2"), keyCaptor.getValue());
|
||||
}
|
||||
|
||||
@ApplicationScoped
|
||||
public static class DummyPreCommitTxHook implements PreCommitTxHook {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,161 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class PredicateKvIteratorTest {
|
||||
|
||||
@Test
|
||||
public void simpleTest() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.GT, 1),
|
||||
IteratorStart.GE, 1, v -> (v % 2 == 0) ? v : null);
|
||||
var expected = List.of(Pair.of(4, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(pair, pit.next());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
var expected = List.of(Pair.of(4, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(pair, pit.next());
|
||||
}
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest2() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 1),
|
||||
IteratorStart.LT, 1, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 2),
|
||||
IteratorStart.LT, 2, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 4),
|
||||
IteratorStart.LE, 4, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest3() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 7).plus(6, 8);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7),
|
||||
IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 8),
|
||||
IteratorStart.LT, 8, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 6),
|
||||
IteratorStart.LE, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(4, pit.peekNextKey());
|
||||
Assertions.assertFalse(pit.hasPrev());
|
||||
Assertions.assertEquals(4, pit.peekNextKey());
|
||||
Assertions.assertFalse(pit.hasPrev());
|
||||
Assertions.assertEquals(Pair.of(4, 6), pit.next());
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(4, pit.peekPrevKey());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(4, pit.peekPrevKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void itTest4() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 8).plus(6, 10);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(5, 8), Pair.of(6, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7),
|
||||
IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(6, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
Assertions.assertTrue(pit.hasPrev());
|
||||
Assertions.assertEquals(6, pit.peekPrevKey());
|
||||
Assertions.assertEquals(Pair.of(6, 10), pit.prev());
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(5, pit.peekNextKey());
|
||||
Assertions.assertTrue(pit.hasPrev());
|
||||
Assertions.assertEquals(4, pit.peekPrevKey());
|
||||
Assertions.assertEquals(5, pit.peekNextKey());
|
||||
Assertions.assertEquals(4, pit.peekPrevKey());
|
||||
Assertions.assertEquals(Pair.of(5, 8), pit.next());
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(5, pit.peekPrevKey());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(5, pit.peekPrevKey());
|
||||
}
|
||||
|
||||
// @Test
|
||||
// public void reverseTest() {
|
||||
// var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
// var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
// IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
//
|
||||
// }
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class TempDataProfile implements QuarkusTestProfile {
|
||||
protected void getConfigOverrides(Map<String, String> toPut) {
|
||||
}
|
||||
|
||||
@Override
|
||||
final public Map<String, String> getConfigOverrides() {
|
||||
Path tempDirWithPrefix;
|
||||
try {
|
||||
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
var ret = new HashMap<String, String>();
|
||||
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
|
||||
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
|
||||
ret.put("dhfs.objects.persistence", "lmdb");
|
||||
getConfigOverrides(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package com.usatiuk.dhfs.objects;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Objects;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TestDataCleaner {
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
String tempDirectory;
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
||||
try {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
} catch (Exception ignored) {
|
||||
Log.warn("Couldn't cleanup test data on init");
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
void purgeDirectory(File dir) {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.usatiuk.dhfs.objects.data;
|
||||
|
||||
import com.usatiuk.dhfs.objects.JData;
|
||||
import com.usatiuk.dhfs.objects.JObjectKey;
|
||||
|
||||
public record Kid(JObjectKey key, String name) implements JData {
|
||||
public Kid withName(String name) {
|
||||
return new Kid(key, name);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user