30 Commits

Author SHA1 Message Date
edf1ae85f5 rocksdb 2025-04-12 20:49:18 +02:00
b42461f188 Objects: fix cache reading twice 2025-04-12 20:15:59 +02:00
513cbd717d Server: remove "dhfs.objects" from proto files 2025-04-05 20:17:29 +02:00
075867daaa Server: less bad chunk merging 2025-04-05 17:50:43 +02:00
8e4ea67e53 Server: a little jmap cleanup 2025-04-04 17:26:12 +02:00
fb128882cb Server: use StringUtils.leftPad for JMapLongKey toString
much faster, without regex parsing every time!
2025-04-03 23:00:36 +02:00
cb8c50000a Objects: simplify merging iterator even more^2
initialMaxValue streams can be simplified too
2025-04-03 22:31:59 +02:00
4c5cbfb5bf Objects: simplify merging iterator even more
no need for the hashmap step of iterator construction
2025-04-03 22:23:23 +02:00
6bcec4a260 Objects: simplify merging iterator
remove the first match "optimization", as it doesn't really
matter with the separate read object methods
2025-04-03 22:13:34 +02:00
df00584367 Objects: getting begin/end iterators 2025-04-03 15:05:09 +02:00
ea4f041d6e Objects: remove outdated snapshot stuff 2025-04-03 13:02:55 +02:00
3c37638db2 Objects: less locks in writeback 2025-04-02 16:14:50 +02:00
0e12a59f23 Objects: a little lazier caching 2025-04-02 15:35:02 +02:00
735dd605d7 Objects: get rid of locks in lmdb store 2025-04-02 15:13:58 +02:00
194166109e Server: don't forget to resend invalidations in case of error 2025-04-01 13:10:11 +02:00
68111a0c4f Server: use formatter for logs in grpc server 2025-04-01 12:53:21 +02:00
b872c32a05 Server: more threads by default 2025-04-01 12:44:56 +02:00
0e14b1cd39 Server: more canDelete logs 2025-04-01 12:19:16 +02:00
17843952f2 Server: push multiple ops, really^2 2025-04-01 12:14:30 +02:00
ffef8959df Server: push multiple ops, really 2025-03-31 16:53:52 +02:00
cb909478dc Server: parallel op sending try lock 2025-03-31 16:35:13 +02:00
06335b4b99 Server: parallel op sending 2025-03-31 16:32:13 +02:00
8351bec59a Revert "Server: parallel op sending"
This reverts commit 0f8002dc2c.
2025-03-31 16:20:58 +02:00
29663f575d Server: try downloading from all if we have a dummy object
just speeds up sync a little
2025-03-31 16:06:17 +02:00
0f8002dc2c Server: parallel op sending 2025-03-31 16:05:09 +02:00
5c50d572d0 Server: no delay op sending 2025-03-31 16:05:00 +02:00
edebb6d8f0 Server: don't try downloading non-remote-objects 2025-03-31 16:04:31 +02:00
5d620c64c5 Server: fix utimens on directories (ignore it!) 2025-03-31 15:28:49 +02:00
b998871e7f Objects: make cache status print debug 2025-03-30 22:08:00 +02:00
69eb96b10c API draft to manually set peer addresses 2025-03-30 16:41:31 +02:00
73 changed files with 4381 additions and 3904 deletions

View File

@@ -69,6 +69,11 @@
<artifactId>lmdbjava</artifactId>
<version>0.9.1</version>
</dependency>
<dependency>
<groupId>org.rocksdb</groupId>
<artifactId>rocksdbjni</artifactId>
<version>9.10.0</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>

View File

@@ -1,53 +1,47 @@
package com.usatiuk.objects;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.UUID;
public record JObjectKey(String name) implements Serializable, Comparable<JObjectKey> {
public static JObjectKey of(String name) {
return new JObjectKey(name);
public sealed interface JObjectKey extends Serializable, Comparable<JObjectKey> permits JObjectKeyImpl, JObjectKeyMax, JObjectKeyMin {
JObjectKeyMin MIN = new JObjectKeyMin();
JObjectKeyMax MAX = new JObjectKeyMax();
static JObjectKey of(String name) {
return new JObjectKeyImpl(name);
}
public static JObjectKey random() {
return new JObjectKey(UUID.randomUUID().toString());
static JObjectKey random() {
return new JObjectKeyImpl(UUID.randomUUID().toString());
}
public static JObjectKey first() {
return new JObjectKey("");
static JObjectKey first() {
return MIN;
}
public static JObjectKey fromBytes(byte[] bytes) {
return new JObjectKey(new String(bytes, StandardCharsets.UTF_8));
static JObjectKey last() {
return MAX;
}
public static JObjectKey fromByteBuffer(ByteBuffer buff) {
return new JObjectKey(StandardCharsets.UTF_8.decode(buff).toString());
static JObjectKey fromBytes(byte[] bytes) {
return new JObjectKeyImpl(new String(bytes, StandardCharsets.UTF_8));
}
static JObjectKey fromByteBuffer(ByteBuffer buff) {
return new JObjectKeyImpl(StandardCharsets.UTF_8.decode(buff).toString());
}
@Override
public int compareTo(JObjectKey o) {
return name.compareTo(o.name);
}
int compareTo(JObjectKey o);
@Override
public String toString() {
return name;
}
String toString();
public byte[] bytes() {
return name.getBytes(StandardCharsets.UTF_8);
}
byte[] bytes();
public ByteBuffer toByteBuffer() {
var heapBb = StandardCharsets.UTF_8.encode(name);
if (heapBb.isDirect()) return heapBb;
var directBb = UninitializedByteBuffer.allocateUninitialized(heapBb.remaining());
directBb.put(heapBb);
directBb.flip();
return directBb;
}
ByteBuffer toByteBuffer();
String name();
}

View File

@@ -0,0 +1,43 @@
package com.usatiuk.objects;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
public record JObjectKeyImpl(String name) implements JObjectKey {
@Override
public int compareTo(JObjectKey o) {
switch (o) {
case JObjectKeyImpl jObjectKeyImpl -> {
return name.compareTo(jObjectKeyImpl.name());
}
case JObjectKeyMax jObjectKeyMax -> {
return -1;
}
case JObjectKeyMin jObjectKeyMin -> {
return 1;
}
}
}
@Override
public String toString() {
return name;
}
@Override
public byte[] bytes() {
return name.getBytes(StandardCharsets.UTF_8);
}
@Override
public ByteBuffer toByteBuffer() {
var heapBb = StandardCharsets.UTF_8.encode(name);
if (heapBb.isDirect()) return heapBb;
var directBb = UninitializedByteBuffer.allocateUninitialized(heapBb.remaining());
directBb.put(heapBb);
directBb.flip();
return directBb;
}
}

View File

@@ -0,0 +1,35 @@
package com.usatiuk.objects;
import java.nio.ByteBuffer;
public record JObjectKeyMax() implements JObjectKey {
@Override
public int compareTo(JObjectKey o) {
switch (o) {
case JObjectKeyImpl jObjectKeyImpl -> {
return 1;
}
case JObjectKeyMax jObjectKeyMax -> {
return 0;
}
case JObjectKeyMin jObjectKeyMin -> {
return 1;
}
}
}
@Override
public byte[] bytes() {
throw new UnsupportedOperationException();
}
@Override
public ByteBuffer toByteBuffer() {
throw new UnsupportedOperationException();
}
@Override
public String name() {
throw new UnsupportedOperationException();
}
}

View File

@@ -0,0 +1,35 @@
package com.usatiuk.objects;
import java.nio.ByteBuffer;
public record JObjectKeyMin() implements JObjectKey {
@Override
public int compareTo(JObjectKey o) {
switch (o) {
case JObjectKeyImpl jObjectKeyImpl -> {
return -1;
}
case JObjectKeyMax jObjectKeyMax -> {
return -1;
}
case JObjectKeyMin jObjectKeyMin -> {
return 0;
}
}
}
@Override
public byte[] bytes() {
throw new UnsupportedOperationException();
}
@Override
public ByteBuffer toByteBuffer() {
throw new UnsupportedOperationException();
}
@Override
public String name() {
throw new UnsupportedOperationException();
}
}

View File

@@ -4,71 +4,56 @@ import io.quarkus.logging.Log;
import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
private final NavigableMap<K, CloseableKvIterator<K, V>> _sortedIterators = new TreeMap<>();
private final String _name;
private final IteratorStart _initialStartType;
private final K _initialStartKey;
private final List<IterProdFn<K, V>> _pendingIterators;
private Map<CloseableKvIterator<K, V>, Integer> _iterators;
// Fast path for the first element
private FirstMatchState<K, V> _firstMatchState;
private final Map<CloseableKvIterator<K, V>, Integer> _iterators;
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
_goingForward = true;
_name = name;
_initialStartType = startType;
_initialStartKey = startKey;
{
int counter = 0;
var iteratorsTmp = new HashMap<CloseableKvIterator<K, V>, Integer>();
for (var iteratorFn : iterators) {
var iterator = iteratorFn.get(startType, startKey);
if ((counter == 0) // Not really a requirement but simplifies some things for now
&& (startType == IteratorStart.GE || startType == IteratorStart.LE)
&& iterator.hasNext()
&& iterator.peekNextKey().equals(startKey)) {
_firstMatchState = new FirstMatchFound<>(iterator);
_pendingIterators = iterators;
Log.tracev("{0} Created fast match: {1}", _name, _firstMatchState);
return;
}
iteratorsTmp.put(iterator, counter++);
}
_iterators = Map.copyOf(iteratorsTmp);
_pendingIterators = null;
}
_iterators = Map.ofEntries(
IntStream.range(0, iterators.size())
.mapToObj(i -> Pair.of(iterators.get(i).get(startType, startKey), i))
.toArray(Pair[]::new)
);
_firstMatchState = new FirstMatchNone<>();
doInitialAdvance();
}
@SafeVarargs
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
this(name, startType, startKey, List.of(iterators));
}
private void doInitialAdvance() {
if (_initialStartType == IteratorStart.LT || _initialStartType == IteratorStart.LE) {
if (startType == IteratorStart.LT || startType == IteratorStart.LE) {
// Starting at a greatest key less than/less or equal than:
// We have a bunch of iterators that have given us theirs "greatest LT/LE key"
// now we need to pick the greatest of those to start with
// But if some of them don't have a lesser key, we need to pick the smallest of those
var found = _iterators.keySet().stream()
.filter(CloseableKvIterator::hasNext)
.map((i) -> {
var peeked = i.peekNextKey();
// Log.warnv("peeked: {0}, from {1}", peeked, i.getClass());
return peeked;
}).distinct().collect(Collectors.partitioningBy(e -> _initialStartType == IteratorStart.LE ? e.compareTo(_initialStartKey) <= 0 : e.compareTo(_initialStartKey) < 0));
K greatestLess = null;
K smallestMore = null;
for (var it : _iterators.keySet()) {
if (it.hasNext()) {
var peeked = it.peekNextKey();
if (startType == IteratorStart.LE ? peeked.compareTo(startKey) <= 0 : peeked.compareTo(startKey) < 0) {
if (greatestLess == null || peeked.compareTo(greatestLess) > 0) {
greatestLess = peeked;
}
} else {
if (smallestMore == null || peeked.compareTo(smallestMore) < 0) {
smallestMore = peeked;
}
}
}
}
K initialMaxValue;
if (!found.get(true).isEmpty())
initialMaxValue = found.get(true).stream().max(Comparator.naturalOrder()).orElse(null);
if (greatestLess != null)
initialMaxValue = greatestLess;
else
initialMaxValue = found.get(false).stream().min(Comparator.naturalOrder()).orElse(null);
initialMaxValue = smallestMore;
if (initialMaxValue == null) {
// Empty iterators
}
for (var iterator : _iterators.keySet()) {
while (iterator.hasNext() && iterator.peekNextKey().compareTo(initialMaxValue) < 0) {
@@ -82,7 +67,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
}
Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
switch (_initialStartType) {
switch (startType) {
// case LT -> {
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
// }
@@ -90,37 +75,17 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
// }
case GT -> {
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(_initialStartKey) > 0;
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) > 0;
}
case GE -> {
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(_initialStartKey) >= 0;
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) >= 0;
}
}
}
private void doHydrate() {
if (_firstMatchState instanceof FirstMatchNone) {
return;
}
boolean consumed = _firstMatchState instanceof FirstMatchConsumed;
if (_firstMatchState instanceof FirstMatchFound(CloseableKvIterator iterator)) {
iterator.close();
}
_firstMatchState = new FirstMatchNone<>();
{
int counter = 0;
var iteratorsTmp = new HashMap<CloseableKvIterator<K, V>, Integer>();
for (var iteratorFn : _pendingIterators) {
var iterator = iteratorFn.get(consumed ? IteratorStart.GT : IteratorStart.GE, _initialStartKey);
iteratorsTmp.put(iterator, counter++);
}
_iterators = Map.copyOf(iteratorsTmp);
}
doInitialAdvance();
@SafeVarargs
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
this(name, startType, startKey, List.of(iterators));
}
private void advanceIterator(CloseableKvIterator<K, V> iterator) {
@@ -151,17 +116,6 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
@Override
protected void reverse() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
doHydrate();
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
}
default -> {
}
}
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
Log.tracev("{0} Reversing from {1}", _name, cur);
_goingForward = !_goingForward;
@@ -185,18 +139,6 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
@Override
protected K peekImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
return firstMatchFound.iterator.peekNextKey();
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
if (_sortedIterators.isEmpty())
throw new NoSuchElementException();
return _goingForward ? _sortedIterators.firstKey() : _sortedIterators.lastKey();
@@ -204,22 +146,6 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
@Override
protected void skipImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
var curVal = firstMatchFound.iterator.next();
firstMatchFound.iterator.close();
_firstMatchState = new FirstMatchConsumed<>();
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, firstMatchFound.iterator, curVal, _sortedIterators.keySet());
return;
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
if (cur == null) {
throw new NoSuchElementException();
@@ -231,38 +157,11 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
@Override
protected boolean hasImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
return true;
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
return !_sortedIterators.isEmpty();
}
@Override
protected Pair<K, V> nextImpl() {
switch (_firstMatchState) {
case FirstMatchFound<K, V> firstMatchFound -> {
var curVal = firstMatchFound.iterator.next();
firstMatchFound.iterator.close();
_firstMatchState = new FirstMatchConsumed<>();
// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, firstMatchFound.iterator, curVal, _sortedIterators.keySet());
return curVal;
}
case FirstMatchConsumed<K, V> firstMatchConsumed -> {
doHydrate();
break;
}
default -> {
}
}
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
if (cur == null) {
throw new NoSuchElementException();
@@ -275,9 +174,6 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
@Override
public void close() {
if (_firstMatchState instanceof FirstMatchFound(CloseableKvIterator iterator)) {
iterator.close();
}
for (CloseableKvIterator<K, V> iterator : _iterators.keySet()) {
iterator.close();
}

View File

@@ -1,7 +0,0 @@
package com.usatiuk.objects.snapshot;
public interface SnapshotEntry {
long whenToRemove();
SnapshotEntry withWhenToRemove(long whenToRemove);
}

View File

@@ -1,8 +0,0 @@
package com.usatiuk.objects.snapshot;
public record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry {
@Override
public SnapshotEntryDeleted withWhenToRemove(long whenToRemove) {
return new SnapshotEntryDeleted(whenToRemove);
}
}

View File

@@ -1,10 +0,0 @@
package com.usatiuk.objects.snapshot;
import com.usatiuk.objects.JDataVersionedWrapper;
public record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry {
@Override
public SnapshotEntryObject withWhenToRemove(long whenToRemove) {
return new SnapshotEntryObject(data, whenToRemove);
}
}

View File

@@ -1,15 +0,0 @@
package com.usatiuk.objects.snapshot;
import com.usatiuk.objects.JObjectKey;
import javax.annotation.Nonnull;
import java.util.Comparator;
public record SnapshotKey(JObjectKey key, long version) implements Comparable<SnapshotKey> {
@Override
public int compareTo(@Nonnull SnapshotKey o) {
return Comparator.comparing(SnapshotKey::key)
.thenComparing(SnapshotKey::version)
.compare(this, o);
}
}

View File

@@ -1,192 +0,0 @@
package com.usatiuk.objects.snapshot;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.iterators.*;
import io.quarkus.logging.Log;
import org.apache.commons.lang3.tuple.Pair;
import java.util.NavigableMap;
import java.util.NoSuchElementException;
import java.util.Optional;
// TODO: test me
public class SnapshotKvIterator extends ReversibleKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> {
private final NavigableMap<SnapshotKey, SnapshotEntry> _objects;
private final long _version;
private final CloseableKvIterator<SnapshotKey, SnapshotEntry> _backing;
private Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> _next = null;
public SnapshotKvIterator(NavigableMap<SnapshotKey, SnapshotEntry> objects, long version, IteratorStart start, JObjectKey startKey) {
_objects = objects;
_version = version;
_goingForward = true;
if (start == IteratorStart.LT || start == IteratorStart.GE)
_backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, Long.MIN_VALUE));
else if (start == IteratorStart.GT || start == IteratorStart.LE)
_backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, Long.MAX_VALUE));
else
throw new UnsupportedOperationException();
fill();
boolean shouldGoBack = false;
if (start == IteratorStart.LE) {
if (_next == null || _next.getKey().compareTo(startKey) > 0) {
shouldGoBack = true;
}
} else if (start == IteratorStart.LT) {
if (_next == null || _next.getKey().compareTo(startKey) >= 0) {
shouldGoBack = true;
}
}
if (shouldGoBack && _backing.hasPrev()) {
_goingForward = false;
_backing.skipPrev();
fill();
_goingForward = true;
_backing.skip();
fill();
}
switch (start) {
case LT -> {
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
}
case LE -> {
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
}
case GT -> {
assert _next == null || _next.getKey().compareTo(startKey) > 0;
}
case GE -> {
assert _next == null || _next.getKey().compareTo(startKey) >= 0;
}
}
}
private void fillPrev(JObjectKey ltKey) {
if (ltKey != null)
while (_backing.hasPrev() && _backing.peekPrevKey().key().equals(ltKey)) {
Log.tracev("Snapshot skipping prev: {0}", _backing.peekPrevKey());
_backing.skipPrev();
}
_next = null;
while (_backing.hasPrev() && _next == null) {
var prev = _backing.prev();
if (prev.getKey().version() <= _version && prev.getValue().whenToRemove() > _version) {
Log.tracev("Snapshot skipping prev: {0} (too new)", prev);
_next = switch (prev.getValue()) {
case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) ->
Pair.of(prev.getKey().key(), new Data<>(data));
case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(prev.getKey().key(), new Tombstone<>());
default -> throw new IllegalStateException("Unexpected value: " + prev.getValue());
};
}
}
if (_next != null) {
if (_next.getValue() instanceof Data<JDataVersionedWrapper>(
JDataVersionedWrapper value
)) {
assert value.version() <= _version;
}
}
}
private void fillNext() {
_next = null;
while (_backing.hasNext() && _next == null) {
var next = _backing.next();
var nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null;
while (nextNextKey != null && nextNextKey.key().equals(next.getKey().key()) && nextNextKey.version() <= _version) {
Log.tracev("Snapshot skipping next: {0} (too old)", next);
next = _backing.next();
nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null;
}
// next.getValue().whenToRemove() >=_id, read tx might have same snapshot id as some write tx
if (next.getKey().version() <= _version && next.getValue().whenToRemove() > _version) {
_next = switch (next.getValue()) {
case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) ->
Pair.of(next.getKey().key(), new Data<>(data));
case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(next.getKey().key(), new Tombstone<>());
default -> throw new IllegalStateException("Unexpected value: " + next.getValue());
};
}
if (_next != null) {
if (_next.getValue() instanceof Data<JDataVersionedWrapper>(
JDataVersionedWrapper value
)) {
assert value.version() <= _version;
}
}
}
}
private void fill() {
if (_goingForward)
fillNext();
else
fillPrev(Optional.ofNullable(_next).map(Pair::getKey).orElse(null));
}
@Override
protected void reverse() {
_goingForward = !_goingForward;
boolean wasAtEnd = _next == null;
if (_goingForward && !wasAtEnd)
_backing.skip();
else if (!_goingForward && !wasAtEnd)
_backing.skipPrev();
fill();
}
@Override
public JObjectKey peekImpl() {
if (_next == null)
throw new NoSuchElementException();
return _next.getKey();
}
@Override
public void skipImpl() {
if (_next == null)
throw new NoSuchElementException();
fill();
}
@Override
public void close() {
_backing.close();
}
@Override
public boolean hasImpl() {
return _next != null;
}
@Override
public Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> nextImpl() {
if (_next == null)
throw new NoSuchElementException("No more elements");
var ret = _next;
if (ret.getValue() instanceof Data<JDataVersionedWrapper>(
JDataVersionedWrapper value
)) {
assert value.version() <= _version;
}
fill();
Log.tracev("Read: {0}, next: {1}", ret, _next);
return ret;
}
}

View File

@@ -23,16 +23,6 @@ public class SnapshotManager {
// This should not be called for the same objects concurrently
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes) {
// TODO: FIXME:
synchronized (this) {
return writebackStore.commitTx(writes, (id, commit) -> {
commit.run();
});
}
}
@Nonnull
public Optional<JDataVersionedWrapper> readObjectDirect(JObjectKey name) {
return writebackStore.readObject(name);
return writebackStore.commitTx(writes);
}
}

View File

@@ -3,54 +3,92 @@ package com.usatiuk.objects.stores;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.objects.transaction.LockManager;
import io.quarkus.logging.Log;
import io.quarkus.runtime.Startup;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.util.LinkedHashMap;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
@ApplicationScoped
public class CachingObjectPersistentStore {
private final LinkedHashMap<JObjectKey, CacheEntry> _cache = new LinkedHashMap<>();
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
@Inject
LockManager lockManager;
@Inject
SerializingObjectPersistentStore delegate;
@ConfigProperty(name = "dhfs.objects.lru.limit")
long sizeLimit;
@ConfigProperty(name = "dhfs.objects.lru.print-stats")
boolean printStats;
private TreePMap<JObjectKey, CacheEntry> _sortedCache = TreePMap.empty();
private long _cacheVersion = 0;
private long _curSize = 0;
private long _evict = 0;
private ExecutorService _statusExecutor = null;
private record Cache(TreePMap<JObjectKey, CacheEntry> map,
int size,
long version,
int sizeLimit) {
public Cache withPut(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
int objSize = obj.map(JDataVersionedWrapper::estimateSize).orElse(16);
@Startup
void init() {
int newSize = size() + objSize;
var entry = new CacheEntry(obj.<MaybeTombstone<JDataVersionedWrapper>>map(Data::new).orElse(new Tombstone<>()), objSize);
var old = map.get(key);
if (old != null)
newSize -= old.size();
TreePMap<JObjectKey, CacheEntry> newCache = map().plus(key, entry);
while (newSize > sizeLimit) {
var del = newCache.firstEntry();
newCache = newCache.minusFirstEntry();
newSize -= del.getValue().size();
}
return new Cache(
newCache,
newSize,
version,
sizeLimit
);
}
public Cache withVersion(long version) {
return new Cache(map, size, version, sizeLimit);
}
}
private final AtomicReference<Cache> _cache;
private ExecutorService _commitExecutor;
private ExecutorService _statusExecutor;
private AtomicLong _cached = new AtomicLong();
private AtomicLong _cacheTries = new AtomicLong();
public CachingObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.lru.limit") int sizeLimit) {
_cache = new AtomicReference<>(
new Cache(TreePMap.empty(), 0, -1, sizeLimit)
);
}
void init(@Observes @Priority(110) StartupEvent event) {
try (var s = delegate.getSnapshot()) {
_cache.set(_cache.get().withVersion(s.id()));
}
_commitExecutor = Executors.newSingleThreadExecutor();
if (printStats) {
_statusExecutor = Executors.newSingleThreadExecutor();
_statusExecutor.submit(() -> {
try {
while (true) {
Thread.sleep(10000);
if (_curSize > 0)
Log.info("Cache status: size=" + _curSize / 1024 / 1024 + "MB" + " evicted=" + _evict);
_evict = 0;
Log.infov("Cache status: size=" + _cache.get().size() / 1024 / 1024 + "MB" + " cache success ratio: " + (_cached.get() / (double) _cacheTries.get()));
_cached.set(0);
_cacheTries.set(0);
Thread.sleep(1000);
}
} catch (InterruptedException ignored) {
}
@@ -58,237 +96,178 @@ public class CachingObjectPersistentStore {
}
}
private void put(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
// Log.tracev("Adding {0} to cache: {1}", key, obj);
_lock.writeLock().lock();
public void commitTx(TxManifestObj<? extends JDataVersionedWrapper> objs, long txId) {
Log.tracev("Committing: {0} writes, {1} deletes", objs.written().size(), objs.deleted().size());
var cache = _cache.get();
var commitFuture = _commitExecutor.submit(() -> delegate.prepareTx(objs, txId).run());
for (var write : objs.written()) {
cache = cache.withPut(write.getLeft(), Optional.of(write.getRight()));
}
for (var del : objs.deleted()) {
cache = cache.withPut(del, Optional.empty());
}
cache = cache.withVersion(txId);
try {
int size = obj.map(JDataVersionedWrapper::estimateSize).orElse(16);
_curSize += size;
var entry = new CacheEntry(obj.<MaybeTombstone<JDataVersionedWrapper>>map(Data::new).orElse(new Tombstone<>()), size);
var old = _cache.putLast(key, entry);
_sortedCache = _sortedCache.plus(key, entry);
if (old != null)
_curSize -= old.size();
while (_curSize >= sizeLimit) {
var del = _cache.pollFirstEntry();
_sortedCache = _sortedCache.minus(del.getKey());
_curSize -= del.getValue().size();
_evict++;
}
} finally {
_lock.writeLock().unlock();
commitFuture.get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
_cache.set(cache);
@Nonnull
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
_lock.readLock().lock();
try {
var got = _cache.get(name);
if (got != null) {
return got.object().opt();
}
} finally {
_lock.readLock().unlock();
}
// Global object lock, prevent putting the object into cache
// if its being written right now by another thread
try (var lock = lockManager.tryLockObject(name)) {
var got = delegate.readObject(name);
if (lock == null)
return got;
_lock.writeLock().lock();
try {
put(name, got);
// No need to increase cache version, the objects didn't change
} finally {
_lock.writeLock().unlock();
}
return got;
}
}
public void commitTx(TxManifestObj<? extends JDataVersionedWrapper> names, long txId) {
var serialized = delegate.prepareManifest(names);
Log.tracev("Committing: {0} writes, {1} deletes", names.written().size(), names.deleted().size());
// A little complicated locking to minimize write lock holding time
delegate.commitTx(serialized, txId, (commit) -> {
_lock.writeLock().lock();
try {
// Make the changes visible atomically both in cache and in the underlying store
for (var write : names.written()) {
put(write.getLeft(), Optional.of(write.getRight()));
}
for (var del : names.deleted()) {
put(del, Optional.empty());
}
++_cacheVersion;
commit.run();
} finally {
_lock.writeLock().unlock();
}
});
Log.tracev("Committed: {0} writes, {1} deletes", names.written().size(), names.deleted().size());
Log.tracev("Committed: {0} writes, {1} deletes", objs.written().size(), objs.deleted().size());
}
public Snapshot<JObjectKey, JDataVersionedWrapper> getSnapshot() {
TreePMap<JObjectKey, CacheEntry> curSortedCache;
Snapshot<JObjectKey, JDataVersionedWrapper> backing = null;
long cacheVersion;
while (true) {
var cache = _cache.get();
if (cache == null)
return delegate.getSnapshot();
Cache curCache = null;
Snapshot<JObjectKey, JDataVersionedWrapper> backing = null;
try {
// Log.tracev("Getting cache snapshot");
// Decrease the lock time as much as possible
_lock.readLock().lock();
try {
curSortedCache = _sortedCache;
cacheVersion = _cacheVersion;
// TODO: Could this be done without lock?
curCache = _cache.get();
backing = delegate.getSnapshot();
} finally {
_lock.readLock().unlock();
}
Snapshot<JObjectKey, JDataVersionedWrapper> finalBacking = backing;
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
private final TreePMap<JObjectKey, CacheEntry> _curSortedCache = curSortedCache;
private final Snapshot<JObjectKey, JDataVersionedWrapper> _backing = finalBacking;
private final long _snapshotCacheVersion = cacheVersion;
if (curCache.version() != backing.id()) {
backing.close();
backing = null;
continue;
}
Snapshot<JObjectKey, JDataVersionedWrapper> finalBacking = backing;
Cache finalCurCache = curCache;
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
private boolean _invalid = false;
private final Cache _curCache = finalCurCache;
private final Snapshot<JObjectKey, JDataVersionedWrapper> _backing = finalBacking;
private void maybeCache(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
if (_snapshotCacheVersion != _cacheVersion)
return;
private void maybeCache(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
_cacheTries.incrementAndGet();
if (_invalid)
return;
_lock.writeLock().lock();
try {
if (_snapshotCacheVersion != _cacheVersion) {
// Log.tracev("Not caching: {0}", key);
} else {
// Log.tracev("Caching: {0}", key);
put(key, obj);
var globalCache = _cache.get();
if (globalCache.version() != _curCache.version()) {
_invalid = true;
return;
}
} finally {
_lock.writeLock().unlock();
}
}
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
return new TombstoneMergingKvIterator<>("cache", start, key,
(mS, mK)
-> new MappingKvIterator<>(
new NavigableMapKvIterator<>(_curSortedCache, mS, mK),
e -> {
var newCache = globalCache.withPut(key, obj);
if (_cache.compareAndSet(globalCache, newCache))
_cached.incrementAndGet();
}
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
return new TombstoneMergingKvIterator<>("cache", start, key,
(mS, mK)
-> new MappingKvIterator<>(
new NavigableMapKvIterator<>(_curCache.map(), mS, mK),
e -> {
// Log.tracev("Taken from cache: {0}", e);
return e.object();
}
),
(mS, mK) -> new MappingKvIterator<>(new CachingKvIterator(_backing.getIterator(start, key)), Data::new));
}
@Nonnull
@Override
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
var cached = _curSortedCache.get(name);
if (cached != null) {
return switch (cached.object()) {
case Data<JDataVersionedWrapper> data -> Optional.of(data.value());
case Tombstone<JDataVersionedWrapper> tombstone -> {
yield Optional.empty();
}
default -> throw new IllegalStateException("Unexpected value: " + cached.object());
};
return e.object();
}
),
(mS, mK) -> new MappingKvIterator<>(new CachingKvIterator(_backing.getIterator(start, key)), Data::new));
}
var read = _backing.readObject(name);
maybeCache(name, read);
return _backing.readObject(name);
}
@Override
public long id() {
return _backing.id();
}
@Override
public void close() {
_backing.close();
}
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, JDataVersionedWrapper> {
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _delegate;
private CachingKvIterator(CloseableKvIterator<JObjectKey, JDataVersionedWrapper> delegate) {
_delegate = delegate;
@Nonnull
@Override
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
var cached = _curCache.map().get(name);
if (cached != null) {
return switch (cached.object()) {
case Data<JDataVersionedWrapper> data -> Optional.of(data.value());
case Tombstone<JDataVersionedWrapper> tombstone -> {
yield Optional.empty();
}
default -> throw new IllegalStateException("Unexpected value: " + cached.object());
};
}
var read = _backing.readObject(name);
maybeCache(name, read);
return read;
}
@Override
public JObjectKey peekNextKey() {
return _delegate.peekNextKey();
}
@Override
public void skip() {
_delegate.skip();
public long id() {
return _backing.id();
}
@Override
public void close() {
_delegate.close();
_backing.close();
}
@Override
public boolean hasNext() {
return _delegate.hasNext();
}
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, JDataVersionedWrapper> {
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _delegate;
@Override
public JObjectKey peekPrevKey() {
return _delegate.peekPrevKey();
}
private CachingKvIterator(CloseableKvIterator<JObjectKey, JDataVersionedWrapper> delegate) {
_delegate = delegate;
}
@Override
public Pair<JObjectKey, JDataVersionedWrapper> prev() {
var prev = _delegate.prev();
maybeCache(prev.getKey(), Optional.of(prev.getValue()));
return prev;
}
@Override
public JObjectKey peekNextKey() {
return _delegate.peekNextKey();
}
@Override
public boolean hasPrev() {
return _delegate.hasPrev();
}
@Override
public void skip() {
_delegate.skip();
}
@Override
public void skipPrev() {
_delegate.skipPrev();
}
@Override
public void close() {
_delegate.close();
}
@Override
public Pair<JObjectKey, JDataVersionedWrapper> next() {
var next = _delegate.next();
maybeCache(next.getKey(), Optional.of(next.getValue()));
return next;
@Override
public boolean hasNext() {
return _delegate.hasNext();
}
@Override
public JObjectKey peekPrevKey() {
return _delegate.peekPrevKey();
}
@Override
public Pair<JObjectKey, JDataVersionedWrapper> prev() {
var prev = _delegate.prev();
maybeCache(prev.getKey(), Optional.of(prev.getValue()));
return prev;
}
@Override
public boolean hasPrev() {
return _delegate.hasPrev();
}
@Override
public void skipPrev() {
_delegate.skipPrev();
}
@Override
public Pair<JObjectKey, JDataVersionedWrapper> next() {
var next = _delegate.next();
maybeCache(next.getKey(), Optional.of(next.getValue()));
return next;
}
}
};
} catch (Throwable ex) {
if (backing != null) {
backing.close();
}
};
} catch (Throwable ex) {
if (backing != null) {
backing.close();
throw ex;
}
throw ex;
}
}
public long getLastTxId() {
return delegate.getLastCommitId();
}
private record CacheEntry(MaybeTombstone<JDataVersionedWrapper> object, long size) {
private record CacheEntry(MaybeTombstone<JDataVersionedWrapper> object, int size) {
}
}

View File

@@ -1,14 +1,16 @@
package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import com.usatiuk.dhfs.utils.RefcountedCloseable;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyMax;
import com.usatiuk.objects.JObjectKeyMin;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.KeyPredicateKvIterator;
import com.usatiuk.objects.iterators.ReversibleKvIterator;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import com.usatiuk.dhfs.utils.RefcountedCloseable;
import io.quarkus.arc.properties.IfBuildProperty;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
@@ -30,9 +32,6 @@ import java.nio.file.Path;
import java.util.Arrays;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Consumer;
import static org.lmdbjava.DbiFlags.MDB_CREATE;
import static org.lmdbjava.Env.create;
@@ -43,12 +42,9 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
private static final String DB_NAME = "objects";
private static final byte[] DB_VER_OBJ_NAME = "__DB_VER_OBJ".getBytes(StandardCharsets.UTF_8);
private final Path _root;
private final AtomicReference<RefcountedCloseable<Txn<ByteBuffer>>> _curReadTxn = new AtomicReference<>();
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
private Env<ByteBuffer> _env;
private Dbi<ByteBuffer> _db;
private boolean _ready = false;
private long _lastTxId = 0;
public LmdbObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) {
_root = Path.of(root).resolve("objects");
@@ -65,22 +61,33 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
.open(_root.toFile(), EnvFlags.MDB_NOTLS);
_db = _env.openDbi(DB_NAME, MDB_CREATE);
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
bb.put(DB_VER_OBJ_NAME);
bb.flip();
try (Txn<ByteBuffer> txn = _env.txnRead()) {
var value = _db.get(txn, bb);
if (value != null) {
var ver = value.getLong();
Log.infov("Read version: {0}", ver);
_lastTxId = ver;
try (Txn<ByteBuffer> txn = _env.txnWrite()) {
var read = readTxId(txn);
if (read.isPresent()) {
Log.infov("Read tx id {0}", read.get());
} else {
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
bb.put(DB_VER_OBJ_NAME);
bb.flip();
var bbData = ByteBuffer.allocateDirect(8);
bbData.putLong(0);
bbData.flip();
_db.put(txn, bb, bbData);
txn.commit();
}
}
_ready = true;
}
private Optional<Long> readTxId(Txn<ByteBuffer> txn) {
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
bb.put(DB_VER_OBJ_NAME);
bb.flip();
var value = _db.get(txn, bb);
return Optional.ofNullable(value).map(ByteBuffer::getLong);
}
void shutdown(@Observes @Priority(900) ShutdownEvent event) throws IOException {
_ready = false;
_db.close();
@@ -101,76 +108,50 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
}
}
private RefcountedCloseable<Txn<ByteBuffer>> getCurTxn() {
_lock.readLock().lock();
try {
var got = _curReadTxn.get();
var refInc = Optional.ofNullable(got).map(RefcountedCloseable::ref).orElse(null);
if (refInc != null) {
return got;
} else {
var newTxn = new RefcountedCloseable<>(_env.txnRead());
_curReadTxn.compareAndSet(got, newTxn);
return newTxn;
}
} finally {
_lock.readLock().unlock();
}
}
@Override
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
return new KeyPredicateKvIterator<>(new LmdbKvIterator(start, key), start, key, (k) -> !Arrays.equals(k.name().getBytes(StandardCharsets.UTF_8), DB_VER_OBJ_NAME));
}
@Override
public Snapshot<JObjectKey, ByteString> getSnapshot() {
_lock.readLock().lock();
try {
var txn = new RefcountedCloseable<>(_env.txnRead());
var commitId = getLastCommitId();
return new Snapshot<JObjectKey, ByteString>() {
private final RefcountedCloseable<Txn<ByteBuffer>> _txn = txn;
private final long _id = commitId;
private boolean _closed = false;
var txn = new RefcountedCloseable<>(_env.txnRead());
long commitId = readTxId(txn.get()).orElseThrow();
return new Snapshot<JObjectKey, ByteString>() {
private final RefcountedCloseable<Txn<ByteBuffer>> _txn = txn;
private final long _id = commitId;
private boolean _closed = false;
@Override
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
assert !_closed;
return new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn.ref(), start, key), start, key, (k) -> !Arrays.equals(k.name().getBytes(StandardCharsets.UTF_8), DB_VER_OBJ_NAME));
}
@Override
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
assert !_closed;
return new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn.ref(), start, key), start, key, (k) -> !Arrays.equals(k.name().getBytes(StandardCharsets.UTF_8), DB_VER_OBJ_NAME));
}
@Nonnull
@Override
public Optional<ByteString> readObject(JObjectKey name) {
assert !_closed;
var got = _db.get(_txn.get(), name.toByteBuffer());
var ret = Optional.ofNullable(got).map(ByteString::copyFrom);
return ret;
}
@Nonnull
@Override
public Optional<ByteString> readObject(JObjectKey name) {
assert !_closed;
var got = _db.get(_txn.get(), name.toByteBuffer());
var ret = Optional.ofNullable(got).map(ByteString::copyFrom);
return ret;
}
@Override
public long id() {
assert !_closed;
return _id;
}
@Override
public long id() {
assert !_closed;
return _id;
}
@Override
public void close() {
assert !_closed;
_closed = true;
_txn.unref();
}
};
} finally {
_lock.readLock().unlock();
}
@Override
public void close() {
assert !_closed;
_closed = true;
_txn.unref();
}
};
}
@Override
public void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked) {
public Runnable prepareTx(TxManifestRaw names, long txId) {
verifyReady();
try (Txn<ByteBuffer> txn = _env.txnWrite()) {
var txn = _env.txnWrite();
try {
for (var written : names.written()) {
// TODO:
var bb = UninitializedByteBuffer.allocateUninitialized(written.getValue().size());
@@ -182,33 +163,26 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
_db.delete(txn, key.toByteBuffer());
}
assert txId > readTxId(txn).orElseThrow();
var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length);
bb.put(DB_VER_OBJ_NAME);
bb.flip();
var bbData = ByteBuffer.allocateDirect(8);
commitLocked.accept(() -> {
_lock.writeLock().lock();
try {
var realTxId = txId;
if (realTxId == -1)
realTxId = _lastTxId + 1;
assert realTxId > _lastTxId;
_lastTxId = realTxId;
bbData.putLong(realTxId);
bbData.flip();
_db.put(txn, bb, bbData);
_curReadTxn.set(null);
txn.commit();
} finally {
_lock.writeLock().unlock();
}
});
bbData.putLong(txId);
bbData.flip();
_db.put(txn, bb, bbData);
} catch (Throwable t) {
txn.close();
throw t;
}
return () -> {
try {
txn.commit();
} finally {
txn.close();
}
};
}
@Override
@@ -229,16 +203,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
return _root.toFile().getUsableSpace();
}
@Override
public long getLastCommitId() {
_lock.readLock().lock();
try {
return _lastTxId;
} finally {
_lock.readLock().unlock();
}
}
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, ByteString> {
private static final Cleaner CLEANER = Cleaner.create();
private final RefcountedCloseable<Txn<ByteBuffer>> _txn;
@@ -264,6 +228,16 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
});
verifyReady();
if (key instanceof JObjectKeyMin) {
_hasNext = _cursor.first();
return;
} else if (key instanceof JObjectKeyMax) {
_hasNext = _cursor.last();
return;
}
if (key.toByteBuffer().remaining() == 0) {
if (!_cursor.first())
return;
@@ -326,10 +300,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
}
LmdbKvIterator(IteratorStart start, JObjectKey key) {
this(getCurTxn(), start, key);
}
@Override
public void close() {
if (_closed.getValue()) {

View File

@@ -2,6 +2,7 @@ package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyImpl;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
@@ -13,7 +14,6 @@ import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.util.Optional;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Consumer;
@ApplicationScoped
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory")
@@ -30,11 +30,6 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
}
}
@Override
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
return new NavigableMapKvIterator<>(_objects, start, key);
}
@Override
public Snapshot<JObjectKey, ByteString> getSnapshot() {
synchronized (this) {
@@ -66,25 +61,19 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
}
}
@Override
public void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked) {
synchronized (this) {
for (var written : names.written()) {
_objects = _objects.plus(written.getKey(), written.getValue());
}
for (JObjectKey key : names.deleted()) {
_objects = _objects.minus(key);
}
commitLocked.accept(() -> {
_lock.writeLock().lock();
try {
assert txId > _lastCommitId;
_lastCommitId = txId;
} finally {
_lock.writeLock().unlock();
public Runnable prepareTx(TxManifestRaw names, long txId) {
return () -> {
synchronized (this) {
for (var written : names.written()) {
_objects = _objects.plus(written.getKey(), written.getValue());
}
});
}
for (JObjectKey key : names.deleted()) {
_objects = _objects.minus(key);
}
assert txId > _lastCommitId;
_lastCommitId = txId;
}
};
}
@Override
@@ -101,14 +90,4 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
public long getUsableSpace() {
return 0;
}
@Override
public long getLastCommitId() {
_lock.readLock().lock();
try {
return _lastCommitId;
} finally {
_lock.readLock().unlock();
}
}
}

View File

@@ -16,23 +16,13 @@ public interface ObjectPersistentStore {
@Nonnull
Optional<ByteString> readObject(JObjectKey name);
// Returns an iterator with a view of all commited objects
// Does not have to guarantee consistent view, snapshots are handled by upper layers
CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key);
Snapshot<JObjectKey, ByteString> getSnapshot();
/**
* @param commitLocked - a function that will be called with a Runnable that will commit the transaction
* the changes in the store will be visible to new transactions only after the runnable is called
*/
void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked);
Runnable prepareTx(TxManifestRaw names, long txId);
long getTotalSpace();
long getFreeSpace();
long getUsableSpace();
long getLastCommitId();
}

View File

@@ -0,0 +1,280 @@
package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyMax;
import com.usatiuk.objects.JObjectKeyMin;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.iterators.ReversibleKvIterator;
import com.usatiuk.objects.snapshot.Snapshot;
import io.quarkus.arc.properties.IfBuildProperty;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.rocksdb.*;
import javax.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.NoSuchElementException;
import java.util.Optional;
@ApplicationScoped
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "rocks")
public class RocksDbObjectPersistentStore implements ObjectPersistentStore {
private static final String DB_NAME = "objects";
private static final byte[] DB_VER_OBJ_NAME = "__DB_VER_OBJ".getBytes(StandardCharsets.UTF_8);
private final Path _root;
private Options _options;
private TransactionDBOptions _transactionDBOptions;
private TransactionDB _db;
private boolean _ready = false;
public RocksDbObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) {
_root = Path.of(root).resolve("objects");
}
void init(@Observes @Priority(100) StartupEvent event) throws RocksDBException {
if (!_root.toFile().exists()) {
Log.info("Initializing with root " + _root);
_root.toFile().mkdirs();
}
RocksDB.loadLibrary();
_options = new Options().setCreateIfMissing(true);
_transactionDBOptions = new TransactionDBOptions();
_db = TransactionDB.open(_options, _transactionDBOptions, _root.toString());
try (var txn = _db.beginTransaction(new WriteOptions())) {
var read = readTxId(txn);
if (read.isPresent()) {
Log.infov("Read tx id {0}", read.get());
} else {
txn.put(DB_VER_OBJ_NAME, ByteBuffer.allocate(8).putLong(0).array());
txn.commit();
}
}
_ready = true;
}
private Optional<Long> readTxId(Transaction txn) throws RocksDBException {
var value = txn.get(new ReadOptions(), DB_VER_OBJ_NAME);
return Optional.ofNullable(value).map(ByteBuffer::wrap).map(ByteBuffer::getLong);
}
void shutdown(@Observes @Priority(900) ShutdownEvent event) {
_ready = false;
_db.close();
}
private void verifyReady() {
if (!_ready) throw new IllegalStateException("Wrong service order!");
}
@Nonnull
@Override
public Optional<ByteString> readObject(JObjectKey name) {
verifyReady();
byte[] got = null;
try {
got = _db.get(new ReadOptions(), name.bytes());
} catch (RocksDBException e) {
throw new RuntimeException(e);
}
return Optional.ofNullable(got).map(ByteString::copyFrom);
}
@Override
public Snapshot<JObjectKey, ByteString> getSnapshot() {
var txn = _db.beginTransaction(new WriteOptions());
txn.setSnapshot();
var rocksDbSnapshot = txn.getSnapshot();
long commitId = 0;
try {
commitId = readTxId(txn).orElseThrow();
} catch (RocksDBException e) {
throw new RuntimeException(e);
}
long finalCommitId = commitId;
return new Snapshot<JObjectKey, ByteString>() {
private final Transaction _txn = txn;
private final long _id = finalCommitId;
private final org.rocksdb.Snapshot _rocksDbSnapshot = rocksDbSnapshot;
private boolean _closed = false;
@Override
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
assert !_closed;
return new RocksDbKvIterator(_txn, start, key, _rocksDbSnapshot);
}
@Nonnull
@Override
public Optional<ByteString> readObject(JObjectKey name) {
assert !_closed;
try (var readOptions = new ReadOptions().setSnapshot(_rocksDbSnapshot)) {
var got = _txn.get(readOptions, name.bytes());
return Optional.ofNullable(got).map(ByteString::copyFrom);
} catch (RocksDBException e) {
throw new RuntimeException(e);
}
}
@Override
public long id() {
assert !_closed;
return _id;
}
@Override
public void close() {
assert !_closed;
_closed = true;
_txn.close();
}
};
}
@Override
public Runnable prepareTx(TxManifestRaw names, long txId) {
verifyReady();
var txn = _db.beginTransaction(new WriteOptions());
try {
for (var written : names.written()) {
txn.put(written.getKey().bytes(), written.getValue().toByteArray());
}
for (JObjectKey key : names.deleted()) {
txn.delete(key.bytes());
}
assert txId > readTxId(txn).orElseThrow();
txn.put(DB_VER_OBJ_NAME, ByteBuffer.allocate(8).putLong(txId).array());
} catch (Throwable t) {
txn.close();
throw new RuntimeException(t);
}
return () -> {
try {
txn.commit();
} catch (RocksDBException e) {
throw new RuntimeException(e);
} finally {
txn.close();
}
};
}
@Override
public long getTotalSpace() {
verifyReady();
return _root.toFile().getTotalSpace();
}
@Override
public long getFreeSpace() {
verifyReady();
return _root.toFile().getFreeSpace();
}
@Override
public long getUsableSpace() {
verifyReady();
return _root.toFile().getUsableSpace();
}
private class RocksDbKvIterator extends ReversibleKvIterator<JObjectKey, ByteString> {
private final RocksIterator _iterator;
private final org.rocksdb.Snapshot _rocksDbSnapshot;
private final ReadOptions _readOptions;
private boolean _hasNext;
RocksDbKvIterator(Transaction txn, IteratorStart start, JObjectKey key, org.rocksdb.Snapshot rocksDbSnapshot) {
_rocksDbSnapshot = rocksDbSnapshot;
_readOptions = new ReadOptions().setSnapshot(_rocksDbSnapshot);
_iterator = txn.getIterator(_readOptions);
verifyReady();
if (key instanceof JObjectKeyMin) {
_iterator.seekToFirst();
} else if (key instanceof JObjectKeyMax) {
_iterator.seekToLast();
} else {
_iterator.seek(key.bytes());
}
_hasNext = _iterator.isValid();
}
@Override
public void close() {
_iterator.close();
}
@Override
protected void reverse() {
if (_hasNext) {
if (_goingForward) {
_iterator.prev();
} else {
_iterator.next();
}
} else {
if (_goingForward) {
_iterator.seekToLast();
} else {
_iterator.seekToFirst();
}
}
_goingForward = !_goingForward;
_hasNext = _iterator.isValid();
}
@Override
protected JObjectKey peekImpl() {
if (!_hasNext) {
throw new NoSuchElementException("No more elements");
}
return JObjectKey.fromByteBuffer(ByteBuffer.wrap(_iterator.key()));
}
@Override
protected void skipImpl() {
if (_goingForward) {
_iterator.next();
} else {
_iterator.prev();
}
_hasNext = _iterator.isValid();
}
@Override
protected boolean hasImpl() {
return _hasNext;
}
@Override
protected Pair<JObjectKey, ByteString> nextImpl() {
if (!_hasNext) {
throw new NoSuchElementException("No more elements");
}
var key = JObjectKey.fromByteBuffer(ByteBuffer.wrap(_iterator.key()));
var value = ByteString.copyFrom(_iterator.value());
if (_goingForward) {
_iterator.next();
} else {
_iterator.prev();
}
_hasNext = _iterator.isValid();
return Pair.of(key, value);
}
}
}

View File

@@ -3,6 +3,7 @@ package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyImpl;
import com.usatiuk.objects.ObjectSerializer;
import com.usatiuk.objects.iterators.CloseableKvIterator;
import com.usatiuk.objects.iterators.IteratorStart;
@@ -14,7 +15,6 @@ import org.apache.commons.lang3.tuple.Pair;
import javax.annotation.Nonnull;
import java.util.Optional;
import java.util.function.Consumer;
@ApplicationScoped
public class SerializingObjectPersistentStore {
@@ -24,18 +24,6 @@ public class SerializingObjectPersistentStore {
@Inject
ObjectPersistentStore delegateStore;
@Nonnull
Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
return delegateStore.readObject(name).map(serializer::deserialize);
}
public TxManifestRaw prepareManifest(TxManifestObj<? extends JDataVersionedWrapper> names) {
return new TxManifestRaw(
names.written().stream()
.map(e -> Pair.of(e.getKey(), serializer.serialize(e.getValue())))
.toList()
, names.deleted());
}
public Snapshot<JObjectKey, JDataVersionedWrapper> getSnapshot() {
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
@@ -65,16 +53,15 @@ public class SerializingObjectPersistentStore {
}
// void commitTx(TxManifestObj<? extends JDataVersionedWrapper> names, Consumer<Runnable> commitLocked) {
// delegateStore.commitTx(prepareManifest(names), commitLocked);
// }
void commitTx(TxManifestRaw names, long txId, Consumer<Runnable> commitLocked) {
delegateStore.commitTx(names, txId, commitLocked);
private TxManifestRaw prepareManifest(TxManifestObj<? extends JDataVersionedWrapper> objs) {
return new TxManifestRaw(
objs.written().stream()
.map(e -> Pair.of(e.getKey(), serializer.serialize(e.getValue())))
.toList()
, objs.deleted());
}
long getLastCommitId() {
return delegateStore.getLastCommitId();
Runnable prepareTx(TxManifestObj<? extends JDataVersionedWrapper> objects, long txId) {
return delegateStore.prepareTx(prepareManifest(objects), txId);
}
}

View File

@@ -3,7 +3,7 @@ package com.usatiuk.objects.stores;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JDataVersionedWrapperImpl;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.JObjectKeyImpl;
import com.usatiuk.objects.iterators.*;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.objects.transaction.TxCommitException;
@@ -23,27 +23,29 @@ import org.pcollections.TreePMap;
import javax.annotation.Nonnull;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
@ApplicationScoped
public class WritebackObjectPersistentStore {
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
private final AtomicReference<PSortedMap<JObjectKey, PendingWriteEntry>> _pendingWrites = new AtomicReference<>(TreePMap.empty());
private final ReentrantReadWriteLock _pendingWritesVersionLock = new ReentrantReadWriteLock();
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
long lastFlushedId,
long lastCommittedId) {
}
private final AtomicReference<PendingWriteData> _pendingWrites = new AtomicReference<>(null);
private final Object _flushWaitSynchronizer = new Object();
private final AtomicLong _lastWrittenTx = new AtomicLong(-1);
private final AtomicLong _counter = new AtomicLong();
private final AtomicLong _lastCommittedTx = new AtomicLong(-1);
private final AtomicLong _lastWrittenId = new AtomicLong(-1);
private final AtomicLong _lastCommittedId = new AtomicLong();
private final AtomicLong _waitedTotal = new AtomicLong(0);
@Inject
CachingObjectPersistentStore cachedStore;
@@ -54,7 +56,7 @@ public class WritebackObjectPersistentStore {
private ExecutorService _statusExecutor;
private volatile boolean _ready = false;
void init(@Observes @Priority(110) StartupEvent event) {
void init(@Observes @Priority(120) StartupEvent event) {
{
BasicThreadFactory factory = new BasicThreadFactory.Builder()
.namingPattern("tx-writeback-%d")
@@ -75,8 +77,12 @@ public class WritebackObjectPersistentStore {
} catch (InterruptedException ignored) {
}
});
_counter.set(cachedStore.getLastTxId());
_lastCommittedTx.set(cachedStore.getLastTxId());
long lastTxId;
try (var s = cachedStore.getSnapshot()) {
lastTxId = s.id();
}
_lastCommittedId.set(lastTxId);
_pendingWrites.set(new PendingWriteData(TreePMap.empty(), lastTxId, lastTxId));
_ready = true;
}
@@ -143,23 +149,26 @@ public class WritebackObjectPersistentStore {
Log.trace("Bundle " + bundle.getId() + " committed");
// Remove from pending writes, after real commit
// As we are the only writers to _pendingWrites, no need to synchronize with iterator creation
// if they get the older version, as it will still contain all the new changes
synchronized (_pendingBundles) {
while (true) {
var curPw = _pendingWrites.get();
var curPwMap = curPw.pendingWrites();
for (var e : bundle._entries.values()) {
var cur = curPw.get(e.key());
var cur = curPwMap.get(e.key());
if (cur.bundleId() <= bundle.getId())
curPw = curPw.minus(e.key());
curPwMap = curPwMap.minus(e.key());
}
_pendingWrites.set(curPw);
// No need to increment version
var newCurPw = new PendingWriteData(
curPwMap,
bundle.getId(),
curPw.lastCommittedId()
);
if (_pendingWrites.compareAndSet(curPw, newCurPw))
break;
}
List<List<Runnable>> callbacks = new ArrayList<>();
synchronized (_notFlushedBundles) {
_lastWrittenTx.set(bundle.getId());
_lastWrittenId.set(bundle.getId());
while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) {
callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted());
}
@@ -182,8 +191,7 @@ public class WritebackObjectPersistentStore {
Log.info("Writeback thread exiting");
}
public TxBundle createBundle() {
public long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
verifyReady();
boolean wait = false;
while (true) {
@@ -204,6 +212,7 @@ public class WritebackObjectPersistentStore {
wait = false;
}
}
synchronized (_pendingBundles) {
synchronized (_flushWaitSynchronizer) {
if (currentSize > sizeLimit) {
@@ -227,79 +236,76 @@ public class WritebackObjectPersistentStore {
continue;
}
}
TxBundle bundle;
synchronized (_notFlushedBundles) {
var bundle = new TxBundle(_counter.incrementAndGet());
bundle = new TxBundle(_lastCommittedId.incrementAndGet());
_pendingBundles.addLast(bundle);
_notFlushedBundles.put(bundle.getId(), bundle);
return bundle;
}
}
}
}
public void commitBundle(TxBundle bundle) {
verifyReady();
_pendingWritesVersionLock.writeLock().lock();
try {
synchronized (_pendingBundles) {
var curPw = _pendingWrites.get();
for (var e : ((TxBundle) bundle)._entries.values()) {
switch (e) {
case TxBundle.CommittedEntry c -> {
curPw = curPw.plus(c.key(), new PendingWrite(c.data, bundle.getId()));
for (var action : writes) {
switch (action) {
case TxRecord.TxObjectRecordWrite<?> write -> {
Log.trace("Flushing object " + write.key());
bundle.commit(new JDataVersionedWrapperImpl(write.data(), bundle.getId()));
}
case TxBundle.DeletedEntry d -> {
curPw = curPw.plus(d.key(), new PendingDelete(d.key, bundle.getId()));
case TxRecord.TxObjectRecordDeleted deleted -> {
Log.trace("Deleting object " + deleted.key());
bundle.delete(deleted.key());
}
default -> {
throw new TxCommitException("Unexpected value: " + action.key());
}
default -> throw new IllegalStateException("Unexpected value: " + e);
}
}
// Now, make the changes visible to new iterators
_pendingWrites.set(curPw);
((TxBundle) bundle).setReady();
if (_pendingBundles.peek() == bundle)
_pendingBundles.notify();
synchronized (_flushWaitSynchronizer) {
currentSize += ((TxBundle) bundle).calculateTotalSize();
while (true) {
var curPw = _pendingWrites.get();
var curPwMap = curPw.pendingWrites();
for (var e : ((TxBundle) bundle)._entries.values()) {
switch (e) {
case TxBundle.CommittedEntry c -> {
curPwMap = curPwMap.plus(c.key(), new PendingWrite(c.data, bundle.getId()));
}
case TxBundle.DeletedEntry d -> {
curPwMap = curPwMap.plus(d.key(), new PendingDelete(d.key, bundle.getId()));
}
default -> throw new IllegalStateException("Unexpected value: " + e);
}
}
// Now, make the changes visible to new iterators
var newCurPw = new PendingWriteData(
curPwMap,
curPw.lastFlushedId(),
bundle.getId()
);
if (!_pendingWrites.compareAndSet(curPw, newCurPw))
continue;
((TxBundle) bundle).setReady();
if (_pendingBundles.peek() == bundle)
_pendingBundles.notify();
synchronized (_flushWaitSynchronizer) {
currentSize += ((TxBundle) bundle).calculateTotalSize();
}
return bundle.getId();
}
}
assert bundle.getId() > _lastCommittedTx.get();
_lastCommittedTx.set(bundle.getId());
} finally {
_pendingWritesVersionLock.writeLock().unlock();
}
}
public void dropBundle(TxBundle bundle) {
verifyReady();
synchronized (_pendingBundles) {
Log.warn("Dropped bundle: " + bundle);
_pendingBundles.remove((TxBundle) bundle);
synchronized (_flushWaitSynchronizer) {
currentSize -= ((TxBundle) bundle).calculateTotalSize();
}
}
}
public void fence(long bundleId) {
var latch = new CountDownLatch(1);
asyncFence(bundleId, latch::countDown);
try {
latch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
public void asyncFence(long bundleId, Runnable fn) {
verifyReady();
if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!");
if (_lastWrittenTx.get() >= bundleId) {
if (_lastWrittenId.get() >= bundleId) {
fn.run();
return;
}
synchronized (_notFlushedBundles) {
if (_lastWrittenTx.get() >= bundleId) {
if (_lastWrittenId.get() >= bundleId) {
fn.run();
return;
}
@@ -307,90 +313,41 @@ public class WritebackObjectPersistentStore {
}
}
public Optional<PendingWriteEntry> getPendingWrite(JObjectKey key) {
synchronized (_pendingBundles) {
return Optional.ofNullable(_pendingWrites.get().get(key));
}
}
@Nonnull
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
var pending = getPendingWrite(name).orElse(null);
return switch (pending) {
case PendingWrite write -> Optional.of(write.data());
case PendingDelete ignored -> Optional.empty();
case null -> cachedStore.readObject(name);
default -> throw new IllegalStateException("Unexpected value: " + pending);
};
}
@Nonnull
public VerboseReadResult readObjectVerbose(JObjectKey key) {
var pending = getPendingWrite(key).orElse(null);
if (pending != null) {
return new VerboseReadResultPending(pending);
}
return new VerboseReadResultPersisted(cachedStore.readObject(key));
}
/**
* @param commitLocked - a function that will be called with a Consumer of a new transaction id,
* that will commit the transaction the changes in the store will be visible to new transactions
* only after the runnable is called
*/
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes, BiConsumer<Long, Runnable> commitLocked) {
var bundle = createBundle();
long bundleId = bundle.getId();
try {
for (var action : writes) {
switch (action) {
case TxRecord.TxObjectRecordWrite<?> write -> {
Log.trace("Flushing object " + write.key());
bundle.commit(new JDataVersionedWrapperImpl(write.data(), bundleId));
}
case TxRecord.TxObjectRecordDeleted deleted -> {
Log.trace("Deleting object " + deleted.key());
bundle.delete(deleted.key());
}
default -> {
throw new TxCommitException("Unexpected value: " + action.key());
}
}
}
} catch (Throwable t) {
dropBundle(bundle);
throw new TxCommitException(t.getMessage(), t);
}
Log.tracef("Committing transaction %d to storage", bundleId);
commitLocked.accept(bundleId, () -> {
commitBundle(bundle);
});
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes) {
long bundleId = commitBundle(writes);
return r -> asyncFence(bundleId, r);
}
public Snapshot<JObjectKey, JDataVersionedWrapper> getSnapshot() {
PSortedMap<JObjectKey, PendingWriteEntry> pendingWrites;
Snapshot<JObjectKey, JDataVersionedWrapper> cache = null;
long lastTxId;
PendingWriteData pw = null;
try {
_pendingWritesVersionLock.readLock().lock();
try {
pendingWrites = _pendingWrites.get();
while (true) {
pw = _pendingWrites.get();
cache = cachedStore.getSnapshot();
lastTxId = getLastTxId();
} finally {
_pendingWritesVersionLock.readLock().unlock();
if (cache.id() >= pw.lastCommittedId())
return cache;
// TODO: Can this really happen?
if (cache.id() < pw.lastFlushedId()) {
assert false;
cache.close();
cache = null;
continue;
}
break;
}
PendingWriteData finalPw = pw;
Snapshot<JObjectKey, JDataVersionedWrapper> finalCache = cache;
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
private final PSortedMap<JObjectKey, PendingWriteEntry> _pendingWrites = pendingWrites;
private final PSortedMap<JObjectKey, PendingWriteEntry> _pendingWrites = finalPw.pendingWrites();
private final Snapshot<JObjectKey, JDataVersionedWrapper> _cache = finalCache;
private final long txId = lastTxId;
private final long txId = finalPw.lastCommittedId();
@Override
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
@@ -423,8 +380,8 @@ public class WritebackObjectPersistentStore {
@Override
public long id() {
assert lastTxId >= _cache.id();
return lastTxId;
assert txId >= _cache.id();
return txId;
}
@Override
@@ -439,15 +396,6 @@ public class WritebackObjectPersistentStore {
}
}
public long getLastTxId() {
_pendingWritesVersionLock.readLock().lock();
try {
return _lastCommittedTx.get();
} finally {
_pendingWritesVersionLock.readLock().unlock();
}
}
public interface VerboseReadResult {
}

View File

@@ -3,6 +3,7 @@ package com.usatiuk.objects.transaction;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JDataVersionedWrapper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.snapshot.Snapshot;
import com.usatiuk.objects.snapshot.SnapshotManager;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import io.quarkus.logging.Log;
@@ -58,18 +59,10 @@ public class JObjectManager {
verifyReady();
var writes = new LinkedHashMap<JObjectKey, TxRecord.TxObjectRecord<?>>();
var dependenciesLocked = new LinkedHashMap<JObjectKey, Optional<JDataVersionedWrapper>>();
Snapshot<JObjectKey, JDataVersionedWrapper> commitSnapshot = null;
Map<JObjectKey, TransactionObject<?>> readSet;
var toUnlock = new ArrayList<AutoCloseableNoThrow>();
Consumer<JObjectKey> addDependency =
key -> {
dependenciesLocked.computeIfAbsent(key, k -> {
var lock = lockManager.lockObject(k);
toUnlock.add(lock);
return snapshotManager.readObjectDirect(k);
});
};
try {
try {
long pendingCount = 0;
@@ -161,7 +154,12 @@ public class JObjectManager {
if (!writes.isEmpty()) {
Stream.concat(readSet.keySet().stream(), writes.keySet().stream())
.sorted(Comparator.comparing(JObjectKey::toString))
.forEach(addDependency);
.forEach(k -> {
var lock = lockManager.lockObject(k);
toUnlock.add(lock);
});
commitSnapshot = snapshotManager.createSnapshot();
}
for (var read : readSet.entrySet()) {
@@ -189,39 +187,47 @@ public class JObjectManager {
Log.trace("Committing transaction start");
var snapshotId = tx.snapshot().id();
for (var read : readSet.entrySet()) {
var dep = dependenciesLocked.get(read.getKey());
if (snapshotId != commitSnapshot.id()) {
for (var read : readSet.entrySet()) {
dependenciesLocked.put(read.getKey(), commitSnapshot.readObject(read.getKey()));
var dep = dependenciesLocked.get(read.getKey());
if (dep.isEmpty() != read.getValue().data().isEmpty()) {
Log.trace("Checking read dependency " + read.getKey() + " - not found");
throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
}
if (dep.isEmpty() != read.getValue().data().isEmpty()) {
Log.trace("Checking read dependency " + read.getKey() + " - not found");
throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
}
if (dep.isEmpty()) {
// TODO: Every write gets a dependency due to hooks
continue;
if (dep.isEmpty()) {
// TODO: Every write gets a dependency due to hooks
continue;
// assert false;
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
}
}
if (dep.get().version() > snapshotId) {
Log.trace("Checking dependency " + read.getKey() + " - newer than");
throw new TxCommitException("Serialization hazard: " + dep.get().data().key() + " " + dep.get().version() + " vs " + snapshotId);
}
if (dep.get().version() > snapshotId) {
Log.trace("Checking dependency " + read.getKey() + " - newer than");
throw new TxCommitException("Serialization hazard: " + dep.get().data().key() + " " + dep.get().version() + " vs " + snapshotId);
}
Log.trace("Checking dependency " + read.getKey() + " - ok with read");
Log.trace("Checking dependency " + read.getKey() + " - ok with read");
}
} else {
Log.tracev("Skipped dependency checks: no changes");
}
boolean same = snapshotId == commitSnapshot.id();
var addFlushCallback = snapshotManager.commitTx(
writes.values().stream()
.filter(r -> {
if (r instanceof TxRecord.TxObjectRecordWrite<?>(JData data)) {
var dep = dependenciesLocked.get(data.key());
if (dep.isPresent() && dep.get().version() > snapshotId) {
Log.trace("Skipping write " + data.key() + " - dependency " + dep.get().version() + " vs " + snapshotId);
return false;
if (!same)
if (r instanceof TxRecord.TxObjectRecordWrite<?>(JData data)) {
var dep = dependenciesLocked.get(data.key());
if (dep.isPresent() && dep.get().version() > snapshotId) {
Log.trace("Skipping write " + data.key() + " - dependency " + dep.get().version() + " vs " + snapshotId);
return false;
}
}
}
return true;
}).toList());
@@ -244,6 +250,8 @@ public class JObjectManager {
for (var unlock : toUnlock) {
unlock.close();
}
if (commitSnapshot != null)
commitSnapshot.close();
tx.close();
}
}

View File

@@ -1,4 +1,4 @@
dhfs.objects.persistence=lmdb
dhfs.objects.persistence=rocks
dhfs.objects.writeback.limit=134217728
dhfs.objects.lru.limit=134217728
dhfs.objects.lru.print-stats=true

View File

@@ -0,0 +1,56 @@
package com.usatiuk.objects;
import com.usatiuk.objects.data.Parent;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
class ObjectsIterateAllTestProfiles {
public static class ObjectsIterateAllTestProfile extends TempDataProfile {
}
}
@QuarkusTest
@TestProfile(ObjectsIterateAllTestProfiles.ObjectsIterateAllTestProfile.class)
public class ObjectsIterateAllTest {
@Inject
TransactionManager txm;
@Inject
Transaction curTx;
@Test
void testBegin() {
var newParent = new Parent(JObjectKey.of("IterateAllBegin1"), "John1");
var newParent2 = new Parent(JObjectKey.of("IterateAllBegin2"), "John2");
var newParent3 = new Parent(JObjectKey.of("IterateAllBegin3"), "John3");
txm.run(() -> {
curTx.put(newParent);
curTx.put(newParent2);
curTx.put(newParent3);
});
txm.run(() -> {
try (var it = curTx.getIterator(JObjectKey.first())) {
Just.checkIterator(it, Stream.<JData>of(newParent, newParent2, newParent3).map(p -> Pair.of(p.key(), p)).toList());
}
});
txm.run(() -> {
try (var it = curTx.getIterator(JObjectKey.last()).reversed()) {
Just.checkIterator(it, Stream.<JData>of(newParent3, newParent2, newParent).map(p -> Pair.of(p.key(), p)).toList());
}
});
}
}

View File

@@ -64,7 +64,7 @@ public abstract class ObjectsTestImpl {
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("ParentCreate")).orElse(null);
Assertions.assertEquals("John", parent.name());
});
}
@@ -84,11 +84,11 @@ public abstract class ObjectsTestImpl {
}));
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("ParentOnCommitHook")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("ParentOnCommitHook")).orElse(null);
Assertions.assertEquals("John", parent.name());
var parent2 = curTx.get(Parent.class, new JObjectKey("ParentOnCommitHook2")).orElse(null);
var parent2 = curTx.get(Parent.class, new JObjectKeyImpl("ParentOnCommitHook2")).orElse(null);
Assertions.assertEquals("John2", parent2.name());
var parent3 = curTx.get(Parent.class, new JObjectKey("ParentOnCommitHook3")).orElse(null);
var parent3 = curTx.get(Parent.class, new JObjectKeyImpl("ParentOnCommitHook3")).orElse(null);
Assertions.assertEquals("John3", parent3.name());
});
}
@@ -103,7 +103,7 @@ public abstract class ObjectsTestImpl {
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("ParentCreateGet")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("ParentCreateGet")).orElse(null);
Assertions.assertEquals("John", parent.name());
});
}
@@ -121,11 +121,11 @@ public abstract class ObjectsTestImpl {
});
txm.run(() -> {
curTx.delete(new JObjectKey("ParentCreateDeleteObject"));
curTx.delete(new JObjectKeyImpl("ParentCreateDeleteObject"));
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("ParentCreateDeleteObject")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("ParentCreateDeleteObject")).orElse(null);
Assertions.assertNull(parent);
});
}
@@ -141,7 +141,7 @@ public abstract class ObjectsTestImpl {
curTx.put(newParent);
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("Parent7")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("Parent7")).orElse(null);
Assertions.assertEquals("John2", parent.name());
});
}
@@ -154,17 +154,17 @@ public abstract class ObjectsTestImpl {
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null);
Assertions.assertEquals("John", parent.name());
curTx.put(parent.withName("John2"));
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.WRITE).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("Parent3"), LockingStrategy.WRITE).orElse(null);
Assertions.assertEquals("John2", parent.name());
curTx.put(parent.withName("John3"));
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("Parent3")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("Parent3")).orElse(null);
Assertions.assertEquals("John3", parent.name());
});
}
@@ -187,7 +187,7 @@ public abstract class ObjectsTestImpl {
} catch (Throwable e) {
throw new RuntimeException(e);
}
var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null);
var got = curTx.get(Parent.class, new JObjectKeyImpl("Parent2")).orElse(null);
var newParent = new Parent(JObjectKey.of("Parent2"), "John");
curTx.put(newParent);
Log.warn("Thread 1 commit");
@@ -207,7 +207,7 @@ public abstract class ObjectsTestImpl {
} catch (Throwable e) {
throw new RuntimeException(e);
}
var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null);
var got = curTx.get(Parent.class, new JObjectKeyImpl("Parent2")).orElse(null);
var newParent = new Parent(JObjectKey.of("Parent2"), "John2");
curTx.put(newParent);
Log.warn("Thread 2 commit");
@@ -226,7 +226,7 @@ public abstract class ObjectsTestImpl {
}
var got = txm.run(() -> {
return curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null);
return curTx.get(Parent.class, new JObjectKeyImpl("Parent2")).orElse(null);
});
if (!thread1Failed.get()) {
@@ -263,7 +263,7 @@ public abstract class ObjectsTestImpl {
} catch (Throwable e) {
throw new RuntimeException(e);
}
var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl(key), strategy).orElse(null);
curTx.put(parent.withName("John"));
Log.warn("Thread 1 commit");
}, 0);
@@ -279,7 +279,7 @@ public abstract class ObjectsTestImpl {
Log.warn("Thread 2");
barrier.await(); // Ensure thread 2 tx id is larger than thread 1
txm.runTries(() -> {
var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl(key), strategy).orElse(null);
curTx.put(parent.withName("John2"));
Log.warn("Thread 2 commit");
}, 0);
@@ -298,7 +298,7 @@ public abstract class ObjectsTestImpl {
}
var got = txm.run(() -> {
return curTx.get(Parent.class, new JObjectKey(key)).orElse(null);
return curTx.get(Parent.class, new JObjectKeyImpl(key)).orElse(null);
});
if (!thread1Failed.get() && !thread2Failed.get()) {
@@ -344,7 +344,7 @@ public abstract class ObjectsTestImpl {
} catch (Throwable e) {
throw new RuntimeException(e);
}
var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl(key), strategy).orElse(null);
curTx.put(parent.withName("John"));
Log.warn("Thread 1 commit");
}, 0);
@@ -365,7 +365,7 @@ public abstract class ObjectsTestImpl {
} catch (Throwable e) {
throw new RuntimeException(e);
}
var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl(key), strategy).orElse(null);
curTx.put(parent.withName("John2"));
Log.warn("Thread 2 commit");
}, 0);
@@ -384,7 +384,7 @@ public abstract class ObjectsTestImpl {
}
var got = txm.run(() -> {
return curTx.get(Parent.class, new JObjectKey(key)).orElse(null);
return curTx.get(Parent.class, new JObjectKeyImpl(key)).orElse(null);
});
Assertions.assertFalse(!thread1Failed.get() && !thread2Failed.get());
@@ -435,7 +435,7 @@ public abstract class ObjectsTestImpl {
throw new RuntimeException(e);
}
Log.info("Thread 1 reading");
Assertions.assertTrue(curTx.get(Parent.class, new JObjectKey(key)).isEmpty());
Assertions.assertTrue(curTx.get(Parent.class, new JObjectKeyImpl(key)).isEmpty());
Log.info("Thread 1 done reading");
});
Log.info("Thread 1 finished");
@@ -452,9 +452,9 @@ public abstract class ObjectsTestImpl {
throw new RuntimeException(e);
}
txm.run(() -> {
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKeyImpl(key)).orElseThrow().name());
});
deleteAndCheck(new JObjectKey(key));
deleteAndCheck(new JObjectKeyImpl(key));
}
@RepeatedTest(100)
@@ -494,7 +494,7 @@ public abstract class ObjectsTestImpl {
throw new RuntimeException(e);
}
Log.info("Thread 1 reading");
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKeyImpl(key)).orElseThrow().name());
Log.info("Thread 1 done reading");
});
Log.info("Thread 1 finished");
@@ -511,9 +511,9 @@ public abstract class ObjectsTestImpl {
throw new RuntimeException(e);
}
txm.run(() -> {
Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKeyImpl(key)).orElseThrow().name());
});
deleteAndCheck(new JObjectKey(key));
deleteAndCheck(new JObjectKeyImpl(key));
}
@RepeatedTest(100)
@@ -559,7 +559,7 @@ public abstract class ObjectsTestImpl {
throw new RuntimeException(e);
}
Log.info("Thread 1 reading");
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKeyImpl(key)).orElseThrow().name());
Log.info("Thread 1 done reading");
});
Log.info("Thread 1 finished");
@@ -576,9 +576,9 @@ public abstract class ObjectsTestImpl {
throw new RuntimeException(e);
}
txm.run(() -> {
Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name());
Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKeyImpl(key)).orElseThrow().name());
});
deleteAndCheck(new JObjectKey(key));
deleteAndCheck(new JObjectKeyImpl(key));
}
@RepeatedTest(100)
@@ -596,7 +596,7 @@ public abstract class ObjectsTestImpl {
curTx.put(new Parent(JObjectKey.of(key4), "John4"));
});
txm.run(() -> {
var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key));
var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key));
var got = iter.next();
Assertions.assertEquals(key1, got.getKey().name());
got = iter.next();
@@ -624,7 +624,7 @@ public abstract class ObjectsTestImpl {
curTx.put(new Parent(JObjectKey.of(key4), "John4"));
});
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
var got = iter.next();
Assertions.assertEquals(key1, got.getKey().name());
got = iter.next();
@@ -636,7 +636,7 @@ public abstract class ObjectsTestImpl {
}
});
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.LT, new JObjectKey(key + "_5"))) {
try (var iter = curTx.getIterator(IteratorStart.LT, new JObjectKeyImpl(key + "_5"))) {
var got = iter.next();
Assertions.assertEquals(key4, got.getKey().name());
Assertions.assertTrue(iter.hasPrev());
@@ -648,14 +648,14 @@ public abstract class ObjectsTestImpl {
}
});
txm.run(() -> {
curTx.delete(new JObjectKey(key));
curTx.delete(new JObjectKey(key1));
curTx.delete(new JObjectKey(key2));
curTx.delete(new JObjectKey(key3));
curTx.delete(new JObjectKey(key4));
curTx.delete(new JObjectKeyImpl(key));
curTx.delete(new JObjectKeyImpl(key1));
curTx.delete(new JObjectKeyImpl(key2));
curTx.delete(new JObjectKeyImpl(key3));
curTx.delete(new JObjectKeyImpl(key4));
});
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key));
}
});
@@ -696,7 +696,7 @@ public abstract class ObjectsTestImpl {
try {
barrier.await();
barrier2.await();
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
var got = iter.next();
Assertions.assertEquals(key1, got.getKey().name());
got = iter.next();
@@ -711,7 +711,7 @@ public abstract class ObjectsTestImpl {
});
Log.info("All threads finished");
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
var got = iter.next();
Assertions.assertEquals(key1, got.getKey().name());
got = iter.next();
@@ -723,14 +723,14 @@ public abstract class ObjectsTestImpl {
}
});
txm.run(() -> {
curTx.delete(new JObjectKey(key));
curTx.delete(new JObjectKey(key1));
curTx.delete(new JObjectKey(key2));
curTx.delete(new JObjectKey(key3));
curTx.delete(new JObjectKey(key4));
curTx.delete(new JObjectKeyImpl(key));
curTx.delete(new JObjectKeyImpl(key1));
curTx.delete(new JObjectKeyImpl(key2));
curTx.delete(new JObjectKeyImpl(key3));
curTx.delete(new JObjectKeyImpl(key4));
});
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key));
}
});
@@ -772,7 +772,7 @@ public abstract class ObjectsTestImpl {
try {
barrier.await();
barrier2.await();
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
var got = iter.next();
Assertions.assertEquals(key1, got.getKey().name());
got = iter.next();
@@ -790,7 +790,7 @@ public abstract class ObjectsTestImpl {
});
Log.info("All threads finished");
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
var got = iter.next();
Assertions.assertEquals(key1, got.getKey().name());
got = iter.next();
@@ -803,14 +803,14 @@ public abstract class ObjectsTestImpl {
}
});
txm.run(() -> {
curTx.delete(new JObjectKey(key));
curTx.delete(new JObjectKey(key1));
curTx.delete(new JObjectKey(key2));
curTx.delete(new JObjectKey(key3));
curTx.delete(new JObjectKey(key4));
curTx.delete(new JObjectKeyImpl(key));
curTx.delete(new JObjectKeyImpl(key1));
curTx.delete(new JObjectKeyImpl(key2));
curTx.delete(new JObjectKeyImpl(key3));
curTx.delete(new JObjectKeyImpl(key4));
});
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key));
}
});
@@ -841,7 +841,7 @@ public abstract class ObjectsTestImpl {
throw new RuntimeException(e);
}
curTx.put(new Parent(JObjectKey.of(key3), "John3"));
curTx.delete(new JObjectKey(key2));
curTx.delete(new JObjectKeyImpl(key2));
Log.info("Thread 1 committing");
});
Log.info("Thread 1 commited");
@@ -852,7 +852,7 @@ public abstract class ObjectsTestImpl {
try {
barrier.await();
barrier2.await();
try (var iter = curTx.getIterator(IteratorStart.LE, new JObjectKey(key3))) {
try (var iter = curTx.getIterator(IteratorStart.LE, new JObjectKeyImpl(key3))) {
var got = iter.next();
Assertions.assertEquals(key2, got.getKey().name());
Assertions.assertEquals("John2", ((Parent) got.getValue()).name());
@@ -878,7 +878,7 @@ public abstract class ObjectsTestImpl {
got = iter.next();
Assertions.assertEquals(key4, got.getKey().name());
}
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
var got = iter.next();
Assertions.assertEquals(key1, got.getKey().name());
got = iter.next();
@@ -896,7 +896,7 @@ public abstract class ObjectsTestImpl {
});
Log.info("All threads finished");
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
var got = iter.next();
Assertions.assertEquals(key1, got.getKey().name());
got = iter.next();
@@ -906,13 +906,13 @@ public abstract class ObjectsTestImpl {
}
});
txm.run(() -> {
curTx.delete(new JObjectKey(key));
curTx.delete(new JObjectKey(key1));
curTx.delete(new JObjectKey(key3));
curTx.delete(new JObjectKey(key4));
curTx.delete(new JObjectKeyImpl(key));
curTx.delete(new JObjectKeyImpl(key1));
curTx.delete(new JObjectKeyImpl(key3));
curTx.delete(new JObjectKeyImpl(key4));
});
txm.run(() -> {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) {
try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKeyImpl(key))) {
Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key));
}
});

View File

@@ -33,15 +33,15 @@ public class PreCommitTxHookTest {
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate2")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("ParentCreate2")).orElse(null);
Assertions.assertEquals("John", parent.name());
});
ArgumentCaptor<JData> dataCaptor = ArgumentCaptor.forClass(JData.class);
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKey.class);
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKeyImpl.class);
Mockito.verify(spyHook, Mockito.times(1)).onCreate(keyCaptor.capture(), dataCaptor.capture());
Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name());
Assertions.assertEquals(new JObjectKey("ParentCreate2"), keyCaptor.getValue());
Assertions.assertEquals(new JObjectKeyImpl("ParentCreate2"), keyCaptor.getValue());
}
@Test
@@ -52,19 +52,19 @@ public class PreCommitTxHookTest {
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("ParentDel")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("ParentDel")).orElse(null);
Assertions.assertEquals("John", parent.name());
});
txm.run(() -> {
curTx.delete(new JObjectKey("ParentDel"));
curTx.delete(new JObjectKeyImpl("ParentDel"));
});
ArgumentCaptor<JData> dataCaptor = ArgumentCaptor.forClass(JData.class);
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKey.class);
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKeyImpl.class);
Mockito.verify(spyHook, Mockito.times(1)).onDelete(keyCaptor.capture(), dataCaptor.capture());
Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name());
Assertions.assertEquals(new JObjectKey("ParentDel"), keyCaptor.getValue());
Assertions.assertEquals(new JObjectKeyImpl("ParentDel"), keyCaptor.getValue());
}
@Test
@@ -81,11 +81,11 @@ public class PreCommitTxHookTest {
ArgumentCaptor<JData> dataCaptorOld = ArgumentCaptor.forClass(JData.class);
ArgumentCaptor<JData> dataCaptorNew = ArgumentCaptor.forClass(JData.class);
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKey.class);
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKeyImpl.class);
Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture());
Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).name());
Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).name());
Assertions.assertEquals(new JObjectKey("ParentEdit"), keyCaptor.getValue());
Assertions.assertEquals(new JObjectKeyImpl("ParentEdit"), keyCaptor.getValue());
}
@Test
@@ -96,18 +96,18 @@ public class PreCommitTxHookTest {
});
txm.run(() -> {
var parent = curTx.get(Parent.class, new JObjectKey("ParentEdit2")).orElse(null);
var parent = curTx.get(Parent.class, new JObjectKeyImpl("ParentEdit2")).orElse(null);
Assertions.assertEquals("John", parent.name());
curTx.put(parent.withName("John changed"));
});
ArgumentCaptor<JData> dataCaptorOld = ArgumentCaptor.forClass(JData.class);
ArgumentCaptor<JData> dataCaptorNew = ArgumentCaptor.forClass(JData.class);
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKey.class);
ArgumentCaptor<JObjectKey> keyCaptor = ArgumentCaptor.forClass(JObjectKeyImpl.class);
Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture());
Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).name());
Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).name());
Assertions.assertEquals(new JObjectKey("ParentEdit2"), keyCaptor.getValue());
Assertions.assertEquals(new JObjectKeyImpl("ParentEdit2"), keyCaptor.getValue());
}
@ApplicationScoped

View File

@@ -1,121 +1,129 @@
package com.usatiuk.objects.stores;
import com.google.protobuf.ByteString;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.Just;
import com.usatiuk.objects.TempDataProfile;
import com.usatiuk.objects.iterators.IteratorStart;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.RepeatedTest;
import java.util.List;
class Profiles {
public static class LmdbKvIteratorTestProfile extends TempDataProfile {
}
}
@QuarkusTest
@TestProfile(Profiles.LmdbKvIteratorTestProfile.class)
public class LmdbKvIteratorTest {
@Inject
LmdbObjectPersistentStore store;
@RepeatedTest(100)
public void iteratorTest1() {
store.commitTx(
new TxManifestRaw(
List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})),
Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})),
Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))),
List.of()
), -1, Runnable::run
);
var iterator = store.getIterator(IteratorStart.GE, JObjectKey.of(""));
Just.checkIterator(iterator, List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})),
Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})),
Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(3)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(2)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(2)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(3)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(2)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(1)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(1)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(3)));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(4)));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(0)));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertFalse(iterator.hasNext());
iterator.close();
iterator = store.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2)));
Assertions.assertTrue(iterator.hasNext());
Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey());
Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey());
Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey());
Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey());
Just.checkIterator(iterator.reversed(), Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})));
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})), iterator.prev());
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.prev());
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.next());
iterator.close();
store.commitTx(new TxManifestRaw(
List.of(),
List.of(JObjectKey.of(Long.toString(1)), JObjectKey.of(Long.toString(2)), JObjectKey.of(Long.toString(3)))
),
-1, Runnable::run
);
}
}
//package com.usatiuk.objects.stores;
//
//
//import com.google.protobuf.ByteString;
//import com.usatiuk.objects.JObjectKey;
//import com.usatiuk.objects.Just;
//import com.usatiuk.objects.TempDataProfile;
//import com.usatiuk.objects.iterators.IteratorStart;
//import io.quarkus.test.junit.QuarkusTest;
//import io.quarkus.test.junit.TestProfile;
//import jakarta.inject.Inject;
//import org.apache.commons.lang3.tuple.Pair;
//import org.junit.jupiter.api.Assertions;
//import org.junit.jupiter.api.RepeatedTest;
//
//import java.util.List;
//
//class Profiles {
// public static class LmdbKvIteratorTestProfile extends TempDataProfile {
// }
//}
//
//@QuarkusTest
//@TestProfile(Profiles.LmdbKvIteratorTestProfile.class)
//public class LmdbKvIteratorTest {
//
// @Inject
// LmdbObjectPersistentStore store;
//
// long getNextTxId() {
// try (var s = store.getSnapshot()) {
// return s.id() + 1;
// }
// }
//
// @RepeatedTest(100)
// public void iteratorTest1() {
// store.prepareTx(
// new TxManifestRaw(
// List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})),
// Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})),
// Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))),
// List.of()
// ), getNextTxId()
// ).run();
//
// try (var snapshot = store.getSnapshot()) {
// var iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(""));
// Just.checkIterator(iterator, List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})),
// Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})),
// Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(3)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(2)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(2)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(3)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(2)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(1)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(1)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(3)));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(4)));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(0)));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertFalse(iterator.hasNext());
// iterator.close();
//
// iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2)));
// Assertions.assertTrue(iterator.hasNext());
// Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey());
// Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey());
// Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey());
// Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey());
// Just.checkIterator(iterator.reversed(), Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})));
// Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
// Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})), iterator.prev());
// Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.prev());
// Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.next());
// iterator.close();
// }
//
// store.prepareTx(new TxManifestRaw(
// List.of(),
// List.of(JObjectKey.of(Long.toString(1)), JObjectKey.of(Long.toString(2)), JObjectKey.of(Long.toString(3)))
// ),
// getNextTxId()
// ).run();
// }
//}

View File

@@ -2,6 +2,7 @@ package com.usatiuk.dhfs;
import com.usatiuk.dhfs.jmap.JMapRef;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyImpl;
public record JDataNormalRef(JObjectKey obj) implements JDataRef {
@Override

View File

@@ -1,6 +1,7 @@
package com.usatiuk.dhfs;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.JObjectKeyImpl;
import java.io.Serializable;

View File

@@ -142,6 +142,7 @@ public class RemoteObjectDeleter {
for (var r : ret) {
if (!r.getValue().getDeletionCandidate()) {
Log.infov("Could not delete {0}: reply from {1}: {2}", target, r.getKey(), r.getValue());
for (var rr : r.getRight().getReferrersList())
curTx.onCommit(() -> autosyncProcessor.add(JObjectKey.of(rr.getName())));
} else {

View File

@@ -48,11 +48,11 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
DhfsFileService fileService;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(new JObjectKey("fs"));
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(new JObjectKey("fs"), LockingStrategy.OPTIMISTIC);
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
}
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,

View File

@@ -7,9 +7,6 @@ import com.usatiuk.dhfs.RemoteObjectMeta;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.files.objects.ChunkData;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
@@ -18,10 +15,13 @@ import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.jmap.JMapEntry;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -47,21 +47,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
@Inject
TransactionManager jObjectTxManager;
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.write_merge_threshold")
float writeMergeThreshold;
@ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take")
float writeMergeMaxChunkToTake;
@ConfigProperty(name = "dhfs.files.write_merge_limit")
float writeMergeLimit;
@ConfigProperty(name = "dhfs.files.write_last_chunk_limit")
float writeLastChunkLimit;
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
boolean useHashForChunks;
@@ -81,11 +72,11 @@ public class DhfsFileServiceImpl implements DhfsFileService {
JMapHelper jMapHelper;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(new JObjectKey("fs"));
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(new JObjectKey("fs"), LockingStrategy.OPTIMISTIC);
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
}
private ChunkData createChunk(ByteString bytes) {
@@ -355,22 +346,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
return readChunk(uuid).size();
}
private void cleanupChunks(File f, Collection<JObjectKey> uuids) {
// FIXME:
// var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet();
// for (var cuuid : uuids) {
// try {
// if (inFile.contains(cuuid)) continue;
// jObjectManager.get(cuuid)
// .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION,
// (m, d, b, v) -> {
// m.removeRef(f.getName());
// return null;
// }));
// } catch (Exception e) {
// Log.error("Error when cleaning chunk " + cuuid, e);
// }
// }
private long alignDown(long num, long n) {
return num & -(1L << n);
}
@Override
@@ -379,7 +356,6 @@ public class DhfsFileServiceImpl implements DhfsFileService {
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
// FIXME:
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
if (file == null) {
Log.error("File not found when trying to write: " + fileUuid);
@@ -396,144 +372,58 @@ public class DhfsFileServiceImpl implements DhfsFileService {
file = remoteTx.getData(File.class, fileUuid).orElse(null);
}
Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
Log.tracev("Getting last");
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(offset + data.size()))) {
last = it.hasNext() ? it.next() : null;
Log.tracev("Last: {0}", last);
}
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
long start = 0;
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
long writeEnd = offset + data.size();
long start = realOffset;
ByteString pendingPrefix = ByteString.empty();
ByteString pendingSuffix = ByteString.empty();
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
first = it.hasNext() ? it.next() : null;
Log.tracev("First: {0}", first);
boolean empty = last == null;
if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
first = null;
last = null;
start = offset;
} else if (!empty) {
assert first != null;
removedChunks.put(first.getKey().key(), first.getValue().ref());
while (it.hasNext() && it.peekNextKey().compareTo(last.getKey()) <= 0) {
var next = it.next();
Log.tracev("Next: {0}", next);
removedChunks.put(next.getKey().key(), next.getValue().ref());
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
while (it.hasNext()) {
var curEntry = it.next();
long curChunkStart = curEntry.getKey().key();
var curChunkId = curEntry.getValue().ref();
long curChunkEnd = curChunkStart + getChunkSize(curChunkId);
if (curChunkEnd <= realOffset) break;
removedChunks.put(curEntry.getKey().key(), curChunkId);
if (curChunkStart < offset) {
if (curChunkStart < start)
start = curChunkStart;
var readChunk = readChunk(curChunkId);
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
}
removedChunks.put(last.getKey().key(), last.getValue().ref());
start = first.getKey().key();
if (curChunkEnd > writeEnd) {
var readChunk = readChunk(curChunkId);
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
}
if (curChunkEnd >= writeEnd) break;
}
}
// NavigableMap<Long, JObjectKey> beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap();
// NavigableMap<Long, JObjectKey> afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap();
// if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) {
// beforeFirst = chunksAll;
// afterLast = Collections.emptyNavigableMap();
// first = null;
// last = null;
// start = offset;
// } else if (!chunksAll.isEmpty()) {
// var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true);
// removedChunks.putAll(between);
// start = first.getKey();
// }
ByteString pendingWrites = ByteString.empty();
if (first != null && first.getKey().key() < offset) {
var chunkBytes = readChunk(first.getValue().ref());
pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey().key())));
}
pendingWrites = pendingWrites.concat(data);
if (last != null) {
var lchunkBytes = readChunk(last.getValue().ref());
if (last.getKey().key() + lchunkBytes.size() > offset + data.size()) {
var startInFile = offset + data.size();
var startInChunk = startInFile - last.getKey().key();
pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size()));
}
}
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
int combinedSize = pendingWrites.size();
if (targetChunkSize > 0) {
// if (combinedSize < (targetChunkSize * writeMergeThreshold)) {
// boolean leftDone = false;
// boolean rightDone = false;
// while (!leftDone && !rightDone) {
// if (beforeFirst.isEmpty()) leftDone = true;
// if (!beforeFirst.isEmpty() || !leftDone) {
// var takeLeft = beforeFirst.lastEntry();
//
// var cuuid = takeLeft.getValue();
//
// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
// leftDone = true;
// continue;
// }
//
// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
// leftDone = true;
// continue;
// }
//
// // FIXME: (and test this)
// beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false);
// start = takeLeft.getKey();
// pendingWrites = readChunk(cuuid).concat(pendingWrites);
// combinedSize += getChunkSize(cuuid);
// removedChunks.put(takeLeft.getKey(), takeLeft.getValue());
// }
// if (afterLast.isEmpty()) rightDone = true;
// if (!afterLast.isEmpty() && !rightDone) {
// var takeRight = afterLast.firstEntry();
//
// var cuuid = takeRight.getValue();
//
// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
// rightDone = true;
// continue;
// }
//
// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
// rightDone = true;
// continue;
// }
//
// // FIXME: (and test this)
// afterLast = afterLast.tailMap(takeRight.getKey(), false);
// pendingWrites = pendingWrites.concat(readChunk(cuuid));
// combinedSize += getChunkSize(cuuid);
// removedChunks.put(takeRight.getKey(), takeRight.getValue());
// }
// }
// }
}
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
{
int targetChunkSize = 1 << targetChunkAlignment;
int cur = 0;
while (cur < combinedSize) {
int end;
if (targetChunkSize <= 0)
if (targetChunkAlignment < 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) {
end = Math.min(cur + targetChunkSize, combinedSize);
} else {
end = combinedSize;
}
}
else
end = Math.min(cur + targetChunkSize, combinedSize);
var thisChunk = pendingWrites.substring(cur, end);
@@ -556,7 +446,6 @@ public class DhfsFileServiceImpl implements DhfsFileService {
}
remoteTx.putData(file);
cleanupChunks(file, removedChunks.values());
return (long) data.size();
});
@@ -575,17 +464,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
}
if (length == 0) {
try (var it = jMapHelper.getIterator(file, IteratorStart.GE, JMapLongKey.of(0))) {
while (it.hasNext()) {
var next = it.next();
jMapHelper.delete(file, next.getKey());
}
}
// var oldChunks = file.chunks();
//
// file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis());
jMapHelper.deleteAll(file);
remoteTx.putData(file);
// cleanupChunks(file, oldChunks.values());
return true;
}
@@ -688,7 +568,6 @@ public class DhfsFileServiceImpl implements DhfsFileService {
}
remoteTx.putData(file);
cleanupChunks(file, removedChunks.values());
return true;
});
}
@@ -734,13 +613,22 @@ public class DhfsFileServiceImpl implements DhfsFileService {
@Override
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
return jObjectTxManager.executeTx(() -> {
var file = remoteTx.getData(File.class, fileUuid).orElseThrow(
() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription(
"File not found for setTimes: " + fileUuid))
);
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
remoteTx.putData(file.withCTime(atimeMs).withMTime(mtimeMs));
return true;
// FIXME:
if (dent instanceof JKleppmannTreeNode) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
return true;
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
});
}

View File

@@ -6,6 +6,7 @@ import com.usatiuk.dhfs.jkleppmanntree.structs.*;
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
import com.usatiuk.dhfs.repository.invalidation.Op;
import com.usatiuk.dhfs.repository.peersync.PeerInfoService;
import com.usatiuk.objects.JObjectKeyImpl;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
@@ -258,22 +259,22 @@ public class JKleppmannTreeManager {
@Override
public JObjectKey getRootId() {
return new JObjectKey(_treeName.name() + "_jt_root");
return JObjectKey.of(_treeName.name() + "_jt_root");
}
@Override
public JObjectKey getTrashId() {
return new JObjectKey(_treeName.name() + "_jt_trash");
return JObjectKey.of(_treeName.name() + "_jt_trash");
}
@Override
public JObjectKey getLostFoundId() {
return new JObjectKey(_treeName.name() + "_jt_lf");
return JObjectKey.of(_treeName.name() + "_jt_lf");
}
@Override
public JObjectKey getNewNodeId() {
return new JObjectKey(UUID.randomUUID().toString());
return JObjectKey.of(UUID.randomUUID().toString());
}
@Override

View File

@@ -7,6 +7,7 @@ import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.repository.peersync.structs.JKleppmannTreeNodeMetaPeer;
import com.usatiuk.kleppmanntree.OpMove;
import com.usatiuk.kleppmanntree.TreeNode;
import com.usatiuk.objects.JObjectKeyImpl;
import org.pcollections.HashTreePMap;
import org.pcollections.PCollection;
import org.pcollections.PMap;

View File

@@ -7,6 +7,7 @@ import com.usatiuk.dhfs.PeerId;
import com.usatiuk.kleppmanntree.CombinedTimestamp;
import com.usatiuk.kleppmanntree.LogRecord;
import com.usatiuk.kleppmanntree.OpMove;
import com.usatiuk.objects.JObjectKeyImpl;
import org.pcollections.PCollection;
import org.pcollections.PMap;
import org.pcollections.PSortedMap;
@@ -49,6 +50,6 @@ public record JKleppmannTreePersistentData(
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of(new JObjectKey(key().name() + "_jt_trash"), new JObjectKey(key().name() + "_jt_root"), new JObjectKey(key().name() + "_jt_lf"));
return List.of(JObjectKey.of(key().name() + "_jt_trash"), JObjectKey.of(key().name() + "_jt_root"), JObjectKey.of(key().name() + "_jt_lf"));
}
}

View File

@@ -17,23 +17,27 @@ public class JMapHelper {
Transaction curTx;
static <K extends JMapKey> JObjectKey makePrefix(JObjectKey holder) {
return JObjectKey.of(holder.name() + "/");
return JObjectKey.of(holder.name() + "=");
}
static <K extends JMapKey> JObjectKey makeKeyFirst(JObjectKey holder) {
return JObjectKey.of(holder.name() + "<");
}
static <K extends JMapKey> JObjectKey makeKey(JObjectKey holder, K key) {
return JObjectKey.of(makePrefix(holder).name() + key.toString());
}
static <K extends JMapKey> JObjectKey makeKeyLast(JObjectKey holder) {
return JObjectKey.of(holder.name() + ">");
}
public <K extends JMapKey> CloseableKvIterator<K, JMapEntry<K>> getIterator(JMapHolder<K> holder, IteratorStart start, K key) {
return new JMapIterator<>(curTx.getIterator(start, makeKey(holder.key(), key)), holder);
}
public <K extends JMapKey> CloseableKvIterator<K, JMapEntry<K>> getIterator(JMapHolder<K> holder, K key) {
return getIterator(holder, IteratorStart.GE, key);
}
public <K extends JMapKey> CloseableKvIterator<K, JMapEntry<K>> getIterator(JMapHolder<K> holder) {
return new JMapIterator<>(curTx.getIterator(IteratorStart.GE, makePrefix(holder.key())), holder);
return new JMapIterator<>(curTx.getIterator(IteratorStart.GT, makeKeyFirst(holder.key())), holder);
}
public <K extends JMapKey> void put(JMapHolder<K> holder, K key, JObjectKey ref) {

View File

@@ -1,5 +1,7 @@
package com.usatiuk.dhfs.jmap;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import java.io.Serializable;
@@ -14,7 +16,7 @@ public record JMapLongKey(long key) implements JMapKey, Comparable<JMapKey>, Ser
@Override
public String toString() {
return String.format("%016d", key);
return StringUtils.leftPad(String.valueOf(key), 20, '0');
}
@Override

View File

@@ -105,7 +105,14 @@ public class AutosyncProcessor {
try {
JObjectKey finalName = name;
boolean ok = txm.run(() -> {
var obj = remoteTx.getMeta(finalName).orElse(null);
RemoteObjectMeta obj;
try {
obj = remoteTx.getMeta(finalName).orElse(null);
} catch (ClassCastException cex) {
Log.debugv("Not downloading object {0}, not remote object", finalName);
return true;
}
if (obj == null) {
Log.debugv("Not downloading object {0}, not found", finalName);
return true;

View File

@@ -3,11 +3,9 @@ package com.usatiuk.dhfs.repository;
import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.repository.peerdiscovery.PeerAddress;
import com.usatiuk.dhfs.repository.peerdiscovery.PeerDiscoveryDirectory;
import com.usatiuk.dhfs.repository.peersync.PeerInfo;
import com.usatiuk.dhfs.repository.peersync.PeerInfoService;
import com.usatiuk.dhfs.repository.peersync.api.PeerSyncApiClientDynamic;
import com.usatiuk.dhfs.repository.peertrust.PeerTrustManager;
import com.usatiuk.dhfs.repository.webapi.AvailablePeerInfo;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import io.quarkus.logging.Log;
@@ -70,7 +68,7 @@ public class PeerManager {
if (_heartbeatExecutor == null) return;
try {
var peers = peerInfoService.getPeersNoSelf();
var pids = peers.stream().map(PeerInfo::id).toList();
var pids = peers.stream().map(com.usatiuk.dhfs.repository.peersync.PeerInfo::id).toList();
List<PeerId> stale = _states.keySet().stream().filter(p -> !pids.contains(p)).toList();
stale.forEach(_states.keySet()::remove);
@@ -98,7 +96,7 @@ public class PeerManager {
}
}
private void handleConnectionSuccess(PeerInfo host, PeerAddress address) {
private void handleConnectionSuccess(com.usatiuk.dhfs.repository.peersync.PeerInfo host, PeerAddress address) {
boolean wasReachable = isReachable(host);
boolean shouldSync = !persistentPeerDataService.isInitialSyncDone(host.id());
@@ -120,7 +118,7 @@ public class PeerManager {
}
}
public void handleConnectionError(PeerInfo host) {
public void handleConnectionError(com.usatiuk.dhfs.repository.peersync.PeerInfo host) {
boolean wasReachable = isReachable(host);
if (wasReachable)
@@ -134,7 +132,7 @@ public class PeerManager {
}
// FIXME:
private boolean pingCheck(PeerInfo host, PeerAddress address) {
private boolean pingCheck(com.usatiuk.dhfs.repository.peersync.PeerInfo host, PeerAddress address) {
try {
return rpcClientFactory.withObjSyncClient(host.id(), address, pingTimeout, (peer, c) -> {
c.ping(PingRequest.getDefaultInstance());
@@ -150,7 +148,7 @@ public class PeerManager {
return _states.containsKey(host);
}
public boolean isReachable(PeerInfo host) {
public boolean isReachable(com.usatiuk.dhfs.repository.peersync.PeerInfo host) {
return isReachable(host.id());
}
@@ -170,7 +168,7 @@ public class PeerManager {
public HostStateSnapshot getHostStateSnapshot() {
return transactionManager.run(() -> {
var partition = peerInfoService.getPeersNoSelf().stream().map(PeerInfo::id)
var partition = peerInfoService.getPeersNoSelf().stream().map(com.usatiuk.dhfs.repository.peersync.PeerInfo::id)
.collect(Collectors.partitioningBy(this::isReachable));
return new HostStateSnapshot(partition.get(true), partition.get(false));
});
@@ -201,10 +199,9 @@ public class PeerManager {
peerTrustManager.reloadTrustManagerHosts(transactionManager.run(() -> peerInfoService.getPeers().stream().toList())); //FIXME:
}
public Collection<AvailablePeerInfo> getSeenButNotAddedHosts() {
public Collection<PeerId> getSeenButNotAddedHosts() {
return transactionManager.run(() -> {
return peerDiscoveryDirectory.getReachablePeers().stream().filter(p -> !peerInfoService.getPeerInfo(p).isPresent())
.map(p -> new AvailablePeerInfo(p.toString())).toList();
return peerDiscoveryDirectory.getReachablePeers().stream().filter(p -> !peerInfoService.getPeerInfo(p).isPresent()).toList();
});
}

View File

@@ -1,7 +1,9 @@
package com.usatiuk.dhfs.repository;
import com.usatiuk.dhfs.ShutdownChecker;
import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.ShutdownChecker;
import com.usatiuk.dhfs.repository.peerdiscovery.IpPeerAddress;
import com.usatiuk.dhfs.repository.peerdiscovery.PeerAddressType;
import com.usatiuk.dhfs.repository.peersync.PeerInfoService;
import com.usatiuk.dhfs.repository.peertrust.PeerTrustManager;
import com.usatiuk.objects.transaction.Transaction;
@@ -13,6 +15,7 @@ import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.pcollections.HashTreePMap;
import org.pcollections.HashTreePSet;
import java.io.File;
@@ -23,6 +26,7 @@ import java.nio.file.StandardOpenOption;
import java.security.KeyPair;
import java.security.cert.CertificateEncodingException;
import java.security.cert.X509Certificate;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
@@ -70,7 +74,7 @@ public class PersistentPeerDataService {
_selfKeyPair = CertificateTools.generateKeyPair();
_selfCertificate = CertificateTools.generateCertificate(_selfKeyPair, _selfUuid.toString());
curTx.put(new PersistentRemoteHostsData(_selfUuid, _selfCertificate, _selfKeyPair, HashTreePSet.empty()));
curTx.put(new PersistentRemoteHostsData(_selfUuid, _selfCertificate, _selfKeyPair, HashTreePSet.empty(), HashTreePMap.empty()));
peerInfoService.putPeer(_selfUuid, _selfCertificate.getEncoded());
} catch (CertificateEncodingException e) {
throw new RuntimeException(e);
@@ -153,4 +157,39 @@ public class PersistentPeerDataService {
return data.initialSyncDone().contains(peerId);
});
}
public List<IpPeerAddress> getPersistentPeerAddresses() {
return txm.run(() -> {
var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null);
if (data == null) throw new IllegalStateException("Self data not found");
return data.persistentPeerAddress().values().stream().toList();
});
}
public void addPersistentPeerAddress(PeerId peerId, IpPeerAddress address) {
txm.run(() -> {
var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null);
if (data == null) throw new IllegalStateException("Self data not found");
var newData = data.persistentPeerAddress().plus(peerId, address.withType(PeerAddressType.WAN)); //TODO:
curTx.put(data.withPersistentPeerAddress(newData));
});
}
public void removePersistentPeerAddress(PeerId peerId) {
txm.run(() -> {
var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null);
if (data == null) throw new IllegalStateException("Self data not found");
var newData = data.persistentPeerAddress().minus(peerId);
curTx.put(data.withPersistentPeerAddress(newData));
});
}
public IpPeerAddress getPersistentPeerAddress(PeerId peerId) {
return txm.run(() -> {
var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null);
if (data == null) throw new IllegalStateException("Self data not found");
return data.persistentPeerAddress().get(peerId);
});
}
}

View File

@@ -1,8 +1,10 @@
package com.usatiuk.dhfs.repository;
import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.repository.peerdiscovery.IpPeerAddress;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.PeerId;
import org.pcollections.PMap;
import org.pcollections.PSet;
import java.io.Serializable;
@@ -12,7 +14,8 @@ import java.security.cert.X509Certificate;
public record PersistentRemoteHostsData(PeerId selfUuid,
X509Certificate selfCertificate,
KeyPair selfKeyPair,
PSet<PeerId> initialSyncDone) implements JData, Serializable {
PSet<PeerId> initialSyncDone,
PMap<PeerId, IpPeerAddress> persistentPeerAddress) implements JData, Serializable {
public static final JObjectKey KEY = JObjectKey.of("self_peer_data");
@Override
@@ -20,9 +23,12 @@ public record PersistentRemoteHostsData(PeerId selfUuid,
return KEY;
}
public PersistentRemoteHostsData withInitialSyncDone(PSet<PeerId> initialSyncDone) {
return new PersistentRemoteHostsData(selfUuid, selfCertificate, selfKeyPair, initialSyncDone);
return new PersistentRemoteHostsData(selfUuid, selfCertificate, selfKeyPair, initialSyncDone, persistentPeerAddress);
}
public PersistentRemoteHostsData withPersistentPeerAddress(PMap<PeerId, IpPeerAddress> persistentPeerAddress) {
return new PersistentRemoteHostsData(selfUuid, selfCertificate, selfKeyPair, initialSyncDone, persistentPeerAddress);
}
@Override

View File

@@ -46,6 +46,8 @@ public class RemoteObjectServiceClient {
ProtoSerializer<OpP, Op> opProtoSerializer;
@Inject
ProtoSerializer<GetObjectReply, ReceivedObject> receivedObjectProtoSerializer;
@Inject
PeerManager peerManager;
public Pair<PeerId, ReceivedObject> getSpecificObject(JObjectKey key, PeerId peerId) {
return rpcClientFactory.withObjSyncClient(peerId, (peer, client) -> {
@@ -63,7 +65,9 @@ public class RemoteObjectServiceClient {
}
var targetVersion = objMeta.versionSum();
var targets = objMeta.knownRemoteVersions().entrySet().stream()
var targets = objMeta.knownRemoteVersions().isEmpty()
? peerManager.getAvailableHosts()
: objMeta.knownRemoteVersions().entrySet().stream()
.filter(entry -> entry.getValue().equals(targetVersion))
.map(Map.Entry::getKey).toList();
@@ -92,10 +96,13 @@ public class RemoteObjectServiceClient {
curTx.get(RemoteObjectMeta.class, ref).map(m -> m.withSeen(true)).ifPresent(curTx::put);
}
});
var serialized = opProtoSerializer.serialize(op);
var built = OpPushRequest.newBuilder().addMsg(serialized).build();
rpcClientFactory.withObjSyncClient(target, (tgt, client) -> client.opPush(built));
}
var builder = OpPushRequest.newBuilder();
for (Op op : ops) {
builder.addMsg(opProtoSerializer.serialize(op));
}
var built = builder.build();
rpcClientFactory.withObjSyncClient(target, (tgt, client) -> client.opPush(built));
return OpPushReply.getDefaultInstance();
}

View File

@@ -72,9 +72,7 @@ public class RemoteObjectServiceServerImpl {
}
public Uni<CanDeleteReply> canDelete(PeerId from, CanDeleteRequest request) {
var peerId = from;
Log.info("<-- canDelete: " + request.getName() + " from " + peerId);
Log.infov("<-- canDelete: {0} from {1}", request, from);
var builder = CanDeleteReply.newBuilder();
@@ -94,6 +92,10 @@ public class RemoteObjectServiceServerImpl {
curTx.onCommit(() -> autosyncProcessor.add(r.obj()));
}
}
if (!builder.getDeletionCandidate()) {
Log.infov("Not deletion candidate: {0}, {1} (asked from {2})", obj, builder, from);
}
});
return Uni.createFrom().item(builder.build());
}
@@ -102,13 +104,13 @@ public class RemoteObjectServiceServerImpl {
try {
var ops = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList();
for (var op : ops) {
Log.info("<-- op: " + op + " from " + from);
Log.infov("<-- opPush: {0} from {1}", op, from);
txm.run(() -> {
opHandler.handleOp(from, op);
});
}
} catch (Exception e) {
Log.error(e, e);
Log.error("Error handling ops", e);
throw e;
}
return Uni.createFrom().item(OpPushReply.getDefaultInstance());

View File

@@ -1,5 +1,8 @@
package com.usatiuk.dhfs.repository.invalidation;
import com.usatiuk.dhfs.repository.RemoteObjectServiceClient;
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
import com.usatiuk.dhfs.utils.DataLocker;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.repository.PeerManager;
@@ -14,9 +17,13 @@ import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import jakarta.ws.rs.core.Link;
import org.apache.commons.collections4.multimap.ArrayListValuedHashMap;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
@@ -39,6 +46,9 @@ public class InvalidationQueueService {
@Inject
PersistentPeerDataService persistentPeerDataService;
private final DataLocker _locker = new DataLocker();
@Inject
RemoteObjectServiceClient remoteObjectServiceClient;
private ExecutorService _executor;
private volatile boolean _shutdown = false;
@@ -103,34 +113,61 @@ public class InvalidationQueueService {
String stats = "Sent invalidation: ";
long success = 0;
for (var e : data) {
// TODO: Race?
if (!peerInfoService.existsPeer(e.peer())) {
Log.warnv("Will ignore invalidation of {0} to {1}, peer not found", e.key(), e.peer());
continue;
List<AutoCloseableNoThrow> locks = new LinkedList<>();
try {
ArrayListValuedHashMap<PeerId, Op> ops = new ArrayListValuedHashMap<>();
ArrayListValuedHashMap<PeerId, Runnable> commits = new ArrayListValuedHashMap<>();
for (var e : data) {
// TODO: Race?
if (!peerInfoService.existsPeer(e.peer())) {
Log.warnv("Will ignore invalidation of {0} to {1}, peer not found", e.key(), e.peer());
continue;
}
if (!remoteHostManager.isReachable(e.peer())) {
deferredInvalidationQueueService.defer(e);
continue;
}
if (!persistentPeerDataService.isInitialSyncDone(e.peer())) {
pushInvalidationToOne(e);
continue;
}
var lock = _locker.tryLock(e);
if (lock == null) {
pushInvalidationToOne(e);
continue;
}
locks.add(lock);
try {
var prepared = opPusher.preparePush(e);
ops.get(e.peer()).addAll(prepared.getLeft());
commits.get(e.peer()).addAll(prepared.getRight());
success++;
} catch (Exception ex) {
Log.warnv("Failed to prepare invalidation to {0}, will retry: {1}", e, ex);
pushInvalidationToOne(e);
}
if (_shutdown) {
Log.info("Invalidation sender exiting");
break;
}
}
if (!remoteHostManager.isReachable(e.peer())) {
deferredInvalidationQueueService.defer(e);
continue;
for (var p : ops.keySet()) {
var list = ops.get(p);
Log.infov("Pushing invalidations to {0}: {1}", p, list);
remoteObjectServiceClient.pushOps(p, list);
commits.get(p).forEach(Runnable::run);
}
if (!persistentPeerDataService.isInitialSyncDone(e.peer())) {
pushInvalidationToOne(e);
continue;
}
try {
opPusher.doPush(e);
success++;
} catch (Exception ex) {
Log.warnv("Failed to send invalidation to {0}, will retry: {1}", e, ex);
pushInvalidationToOne(e);
}
if (_shutdown) {
Log.info("Invalidation sender exiting");
break;
} catch (Exception e) {
Log.warnv("Failed to send invalidations, will retry", e);
for (var inv : data) {
pushInvalidationToOne(inv);
}
} finally {
locks.forEach(AutoCloseableNoThrow::close);
}
stats += success + "/" + data.size() + " ";
@@ -168,11 +205,23 @@ public class InvalidationQueueService {
deferredInvalidationQueueService.defer(entry);
}
void pushInvalidationToOneNoDelay(InvalidationQueueEntry entry) {
if (remoteHostManager.isReachable(entry.peer()))
_queue.addNoDelay(entry);
else
deferredInvalidationQueueService.defer(entry);
}
public void pushInvalidationToOne(PeerId host, JObjectKey obj) {
var entry = new InvalidationQueueEntry(host, obj);
pushInvalidationToOne(entry);
}
public void pushInvalidationToOneNoDelay(PeerId host, JObjectKey obj) {
var entry = new InvalidationQueueEntry(host, obj);
pushInvalidationToOneNoDelay(entry);
}
void pushDeferredInvalidations(InvalidationQueueEntry entry) {
_queue.add(entry);
}

View File

@@ -14,6 +14,7 @@ import com.usatiuk.objects.transaction.TransactionManager;
import io.quarkus.logging.Log;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -35,65 +36,60 @@ public class OpPusher {
@Inject
DtoMapperService dtoMapperService;
public void doPush(InvalidationQueueEntry entry) {
AtomicBoolean doAgain = new AtomicBoolean(false);
do {
// FIXME:
doAgain.set(false);
List<Op> info = txm.run(() -> {
var obj = curTx.get(JData.class, entry.key()).orElse(null);
switch (obj) {
case RemoteObjectMeta remote -> {
JDataRemoteDto data =
remote.knownType().isAnnotationPresent(JDataRemotePush.class)
? remoteTransaction.getData(remote.knownType(), entry.key())
.map(d -> dtoMapperService.toDto(d, d.dtoClass())).orElse(null)
: null;
public Pair<List<Op>, List<Runnable>> preparePush(InvalidationQueueEntry entry) {
List<Op> info = txm.run(() -> {
var obj = curTx.get(JData.class, entry.key()).orElse(null);
switch (obj) {
case RemoteObjectMeta remote -> {
JDataRemoteDto data =
remote.knownType().isAnnotationPresent(JDataRemotePush.class)
? remoteTransaction.getData(remote.knownType(), entry.key())
.map(d -> dtoMapperService.toDto(d, d.dtoClass())).orElse(null)
: null;
if (remote.knownType().isAnnotationPresent(JDataRemotePush.class) && data == null) {
Log.warnv("Failed to get data for push {0} of type {1}", entry.key(), remote.knownType());
}
return List.of(new IndexUpdateOp(entry.key(), remote.changelog(), data));
}
case JKleppmannTreePersistentData pd -> {
var tree = jKleppmannTreeManager.getTree(pd.key());
if (!tree.hasPendingOpsForHost(entry.peer()))
return List.of(tree.getPeriodicPushOp());
var ops = tree.getPendingOpsForHost(entry.peer(), 100);
if (tree.hasPendingOpsForHost(entry.peer())) {
doAgain.set(true);
invalidationQueueService.pushInvalidationToOne(entry.peer(), pd.key());
}
return ops;
}
case null,
default -> {
return null;
if (remote.knownType().isAnnotationPresent(JDataRemotePush.class) && data == null) {
Log.warnv("Failed to get data for push {0} of type {1}", entry.key(), remote.knownType());
}
return List.of(new IndexUpdateOp(entry.key(), remote.changelog(), data));
}
case JKleppmannTreePersistentData pd -> {
var tree = jKleppmannTreeManager.getTree(pd.key());
if (!tree.hasPendingOpsForHost(entry.peer()))
return List.of(tree.getPeriodicPushOp());
var ops = tree.getPendingOpsForHost(entry.peer(), 100);
if (tree.hasPendingOpsForHost(entry.peer())) {
invalidationQueueService.pushInvalidationToOneNoDelay(entry.peer(), pd.key());
}
return ops;
}
case null,
default -> {
return List.of();
}
});
if (info == null) {
return;
}
Log.debugv("Pushing invalidation: entry {0}, sending {1}", entry, info);
remoteObjectServiceClient.pushOps(entry.peer(), info);
txm.run(() -> {
var obj = curTx.get(JData.class, entry.key()).orElse(null);
switch (obj) {
case JKleppmannTreePersistentData pd: {
var tree = jKleppmannTreeManager.getTree(pd.key());
for (var op : info) {
tree.commitOpForHost(entry.peer(), op);
});
List<Runnable> commits = info.stream().<Runnable>map(o -> {
return () -> {
txm.run(() -> {
var obj = curTx.get(JData.class, entry.key()).orElse(null);
switch (obj) {
case JKleppmannTreePersistentData pd: {
var tree = jKleppmannTreeManager.getTree(pd.key());
for (var op : info) {
tree.commitOpForHost(entry.peer(), op);
}
break;
}
break;
case null:
default:
}
case null:
default:
}
});
} while (doAgain.get());
});
};
}).toList();
return Pair.of(info, commits);
}
}

View File

@@ -6,4 +6,7 @@ import java.net.InetAddress;
public record IpPeerAddress(PeerId peer, PeerAddressType type,
InetAddress address, int port, int securePort) implements PeerAddress {
public IpPeerAddress withType(PeerAddressType type) {
return new IpPeerAddress(peer, type, address, port, securePort);
}
}

View File

@@ -0,0 +1,36 @@
package com.usatiuk.dhfs.repository.peerdiscovery;
import com.usatiuk.dhfs.PeerId;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Optional;
public class PeerAddrStringHelper {
public static Optional<IpPeerAddress> parse(String addr) {
if (addr.isEmpty()) {
return Optional.empty();
}
var split = addr.split(":");
try {
return Optional.of(new IpPeerAddress(PeerId.of(split[0]), PeerAddressType.LAN, InetAddress.getByName(split[1]),
Integer.parseInt(split[2]), Integer.parseInt(split[3])));
} catch (UnknownHostException ex) {
throw new RuntimeException(ex);
}
}
public static Optional<IpPeerAddress> parseNoPeer(PeerId peerId, String addr) {
if (addr.isEmpty()) {
return Optional.empty();
}
var split = addr.split(":");
try {
return Optional.of(new IpPeerAddress(peerId, PeerAddressType.LAN, InetAddress.getByName(split[0]),
Integer.parseInt(split[1]), Integer.parseInt(split[2])));
} catch (UnknownHostException ex) {
throw new RuntimeException(ex);
}
}
}

View File

@@ -2,7 +2,9 @@ package com.usatiuk.dhfs.repository.peerdiscovery;
import com.usatiuk.dhfs.PeerId;
public interface PeerAddress {
import java.io.Serializable;
public interface PeerAddress extends Serializable {
PeerId peer();
PeerAddressType type();

View File

@@ -0,0 +1,22 @@
package com.usatiuk.dhfs.repository.peerdiscovery;
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
import io.quarkus.scheduler.Scheduled;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
@ApplicationScoped
public class PersistentStaticPeerDiscovery {
@Inject
PeerDiscoveryDirectory peerDiscoveryDirectory;
@Inject
PersistentPeerDataService persistentPeerDataService;
@Scheduled(every = "1s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP)
public void discoverPeers() {
var addrs = persistentPeerDataService.getPersistentPeerAddresses();
for (var addr : addrs) {
peerDiscoveryDirectory.notifyAddr(addr);
}
}
}

View File

@@ -1,17 +1,13 @@
package com.usatiuk.dhfs.repository.peerdiscovery;
import com.usatiuk.dhfs.PeerId;
import io.quarkus.scheduler.Scheduled;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
@ApplicationScoped
public class StaticPeerDiscovery {
@@ -22,18 +18,8 @@ public class StaticPeerDiscovery {
public StaticPeerDiscovery(@ConfigProperty(name = "dhfs.peerdiscovery.static-peers") Optional<String> staticPeers) {
var peers = staticPeers.orElse("");
_peers = Arrays.stream(peers.split(",")).flatMap(e ->
{
if (e.isEmpty()) {
return Stream.of();
}
var split = e.split(":");
try {
return Stream.of(new IpPeerAddress(PeerId.of(split[0]), PeerAddressType.LAN, InetAddress.getByName(split[1]),
Integer.parseInt(split[2]), Integer.parseInt(split[3])));
} catch (UnknownHostException ex) {
throw new RuntimeException(ex);
}
}).toList();
PeerAddrStringHelper.parse(e).stream()
).toList();
}
@Scheduled(every = "1s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP)

View File

@@ -1,4 +0,0 @@
package com.usatiuk.dhfs.repository.webapi;
public record AvailablePeerInfo(String uuid) {
}

View File

@@ -0,0 +1,4 @@
package com.usatiuk.dhfs.repository.webapi;
public record PeerAddressInfo(String uuid, String address) {
}

View File

@@ -0,0 +1,4 @@
package com.usatiuk.dhfs.repository.webapi;
public record PeerInfo(String uuid, String address) {
}

View File

@@ -12,8 +12,8 @@ import jakarta.ws.rs.Path;
import java.util.Collection;
import java.util.List;
@Path("/objects-manage")
public class ManagementApi {
@Path("/peers-manage")
public class PeerManagementApi {
@Inject
PeerInfoService peerInfoService;
@Inject
@@ -39,7 +39,20 @@ public class ManagementApi {
@Path("available-peers")
@GET
public Collection<AvailablePeerInfo> availablePeers() {
return peerManager.getSeenButNotAddedHosts();
public Collection<KnownPeerInfo> availablePeers() {
return peerManager.getSeenButNotAddedHosts().stream().map(p -> new KnownPeerInfo(p.toString())).toList();
}
@Path("peer-state")
@GET
public Collection<PeerInfo> peerInfos(Collection<String> peerIdStrings) {
return peerIdStrings.stream().map(PeerId::of).map(
peerId -> {
return new PeerInfo(
peerId.toString(),
peerManager.getAddress(peerId).toString()
);
}
).toList();
}
}

View File

@@ -0,0 +1,52 @@
package com.usatiuk.dhfs.repository.webapi;
import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.repository.PeerManager;
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
import com.usatiuk.dhfs.repository.peerdiscovery.PeerAddrStringHelper;
import com.usatiuk.dhfs.repository.peersync.PeerInfoService;
import jakarta.inject.Inject;
import jakarta.ws.rs.*;
import java.util.Collection;
@Path("/peers-addr-manage")
public class PersistentPeerAddressApi {
@Inject
PeerInfoService peerInfoService;
@Inject
PeerManager peerManager;
@Inject
PersistentPeerDataService persistentPeerDataService;
@Path("{peerId}")
@PUT
public void addPeerAddress(String peerAddr, @PathParam("peerId") String peerId) {
if (peerAddr.isEmpty()) {
deletePeerAddress(peerId);
return;
}
persistentPeerDataService.addPersistentPeerAddress(PeerId.of(peerId), PeerAddrStringHelper.parseNoPeer(PeerId.of(peerId), peerAddr).orElseThrow(IllegalArgumentException::new));
}
@Path("{peerId}")
@DELETE
public void deletePeerAddress(@PathParam("peerId") String peerId) {
persistentPeerDataService.removePersistentPeerAddress(PeerId.of(peerId));
}
@Path("{peerId}")
@GET
public String getPeerAddress(@PathParam("peerId") String peerId) {
return persistentPeerDataService.getPersistentPeerAddress(PeerId.of(peerId)).toString();
}
@Path("")
@GET
public Collection<PeerAddressInfo> getPeerAddresses() {
return persistentPeerDataService.getPersistentPeerAddresses()
.stream()
.map(p -> new PeerAddressInfo(p.peer().toString(), p.address().getHostAddress() + ":" + p.port() + ":" + p.securePort()))
.toList();
}
}

View File

@@ -4,7 +4,7 @@ option java_multiple_files = true;
option java_package = "com.usatiuk.dhfs.repository.peerdiscovery";
option java_outer_classname = "DhfsObjectPeerDiscoveryApi";
package dhfs.objects.peerdiscovery;
package dhfs.peerdiscovery;
message PeerDiscoveryInfo {
string uuid = 1;

View File

@@ -4,7 +4,7 @@ option java_multiple_files = true;
option java_package = "com.usatiuk.dhfs.persistence";
option java_outer_classname = "DhfsObjectPersistence";
package dhfs.objects.persistence;
package dhfs.persistence;
message JObjectKeyP {
string name = 1;

View File

@@ -1,12 +1,12 @@
syntax = "proto3";
import "dhfs_objects_serial.proto";
import "dhfs_serial.proto";
option java_multiple_files = true;
option java_package = "com.usatiuk.dhfs.repository";
option java_outer_classname = "DhfsObjectSyncApi";
package dhfs.objects.sync;
package dhfs.sync;
service DhfsObjectSyncGrpc {
rpc OpPush (OpPushRequest) returns (OpPushReply) {}
@@ -22,22 +22,22 @@ message PingRequest {}
message PingReply {}
message GetObjectRequest {
dhfs.objects.persistence.JObjectKeyP name = 2;
dhfs.persistence.JObjectKeyP name = 2;
}
message GetObjectReply {
dhfs.objects.persistence.ObjectChangelog changelog = 5;
dhfs.objects.persistence.JDataRemoteDtoP pushedData = 6;
dhfs.persistence.ObjectChangelog changelog = 5;
dhfs.persistence.JDataRemoteDtoP pushedData = 6;
}
message CanDeleteRequest {
dhfs.objects.persistence.JObjectKeyP name = 2;
repeated dhfs.objects.persistence.JObjectKeyP ourReferrers = 3;
dhfs.persistence.JObjectKeyP name = 2;
repeated dhfs.persistence.JObjectKeyP ourReferrers = 3;
}
message CanDeleteReply {
bool deletionCandidate = 2;
repeated dhfs.objects.persistence.JObjectKeyP referrers = 3;
repeated dhfs.persistence.JObjectKeyP referrers = 3;
}
message OpPushRequest {

View File

@@ -4,7 +4,7 @@ dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.peerdiscovery.broadcast=true
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=1
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.objects.reconnect_interval=5s
dhfs.objects.write_log=false
@@ -15,21 +15,15 @@ dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=2097152
# Writes strictly smaller than this will try to merge with blocks nearby
dhfs.files.write_merge_threshold=0.8
# If a merge would result in a block of greater size than this, stop merging
dhfs.files.write_merge_limit=1.2
# Don't take blocks of this size and above when merging
dhfs.files.write_merge_max_chunk_to_take=1
dhfs.files.write_last_chunk_limit=1.5
dhfs.files.target_chunk_alignment=19
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.files.use_hash_for_chunks=false
dhfs.objects.autosync.threads=2
dhfs.objects.autosync.threads=16
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=4
dhfs.objects.ref-processor.threads=4
dhfs.objects.move-processor.threads=16
dhfs.objects.ref-processor.threads=16
dhfs.objects.opsender.batch-size=100
dhfs.objects.lock_timeout_secs=2
dhfs.local-discovery=true

View File

@@ -1,12 +1,12 @@
package com.usatiuk.dhfs.files;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.TempDataProfile;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.RepeatedTest;
@@ -27,6 +27,7 @@ class Profiles {
protected void getConfigOverrides(Map<String, String> ret) {
ret.put("dhfs.fuse.enabled", "false");
ret.put("dhfs.files.target_chunk_size", "-1");
ret.put("dhfs.files.target_chunk_alignment", "-1");
}
}
@@ -35,6 +36,7 @@ class Profiles {
protected void getConfigOverrides(Map<String, String> ret) {
ret.put("dhfs.fuse.enabled", "false");
ret.put("dhfs.files.target_chunk_size", "3");
ret.put("dhfs.files.target_chunk_alignment", "2");
}
}
}
@@ -150,6 +152,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
fileService.truncate(uuid, 20);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
}

View File

@@ -66,13 +66,13 @@ public class DhfsFuseIT {
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
@@ -246,7 +246,7 @@ public class DhfsFuseIT {
"curl --header \"Content-Type: application/json\" " +
" --request DELETE " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /root/dhfs_default/fuse/newfile1").getExitCode());
@@ -262,7 +262,7 @@ public class DhfsFuseIT {
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);

View File

@@ -92,25 +92,25 @@ public class DhfsFusex3IT {
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
var c2curl1 = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
var c2curl3 = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c3uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
var c3curl = container3.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
@@ -191,7 +191,7 @@ public class DhfsFusex3IT {
"curl --header \"Content-Type: application/json\" " +
" --request DELETE " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
Thread.sleep(10000);

View File

@@ -73,13 +73,13 @@ public class ResyncIT {
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
@@ -113,13 +113,13 @@ public class ResyncIT {
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
@@ -153,13 +153,13 @@ public class ResyncIT {
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" http://localhost:8080/peers-manage/known-peers");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);

View File

@@ -19,16 +19,17 @@ public class DataLocker {
var tag = _locks.get(data);
if (tag != null) {
synchronized (tag) {
if (!tag.released) {
while (!tag.released) {
if (tag.owner == Thread.currentThread()) {
return DUMMY_LOCK;
}
tag.wait(4000L);
if (!tag.released) {
System.out.println("Timeout waiting for lock: " + data);
System.exit(1);
throw new InterruptedException();
}
tag.wait();
// tag.wait(4000L);
// if (!tag.released) {
// System.out.println("Timeout waiting for lock: " + data);
// System.exit(1);
// throw new InterruptedException();
// }
}
continue;
}

View File

@@ -44,6 +44,24 @@ public class HashSetDelayedBlockingQueue<T> {
}
}
// Adds the object to the queue, if it exists re-adds it
// With no delay
// Returns the old object, or null
public T addNoDelay(T el) {
synchronized (this) {
if (_closed) throw new IllegalStateException("Adding to a queue that is closed!");
SetElement<T> old = _set.putFirst(el, new SetElement<>(el, 0));
this.notify();
if (old != null)
return old.el();
else
return null;
}
}
// Adds the object to the queue, if it exists re-adds it with a new delay
// Returns the old object, or null
public T readd(T el) {

View File

@@ -24,6 +24,19 @@ public class HashSetDelayedBlockingQueueTest {
Assertions.assertTrue((gotTime - curTime) >= 1000);
}
@Test
void addNoDelay() throws InterruptedException {
var queue = new HashSetDelayedBlockingQueue<>(1000);
var curTime = System.currentTimeMillis();
queue.addNoDelay("hello!");
var thing = queue.get();
var gotTime = System.currentTimeMillis();
Assertions.assertEquals("hello!", thing);
Assertions.assertTrue((gotTime - curTime) < 500);
}
@Test
void GetImmediate() throws InterruptedException {
var queue = new HashSetDelayedBlockingQueue<>(0);

4868
webui/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -10,27 +10,31 @@
"browserslist": "> 0.5%, last 2 versions, not dead",
"dependencies": {
"jwt-decode": "^4.0.0",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-router-dom": "^6.24.0",
"zod": "^3.23.8"
"react": "^19.1.0",
"react-dom": "^19.1.0",
"react-router": "^7.4.1",
"react-router-dom": "^7.4.1",
"zod": "^3.24.2"
},
"@parcel/resolver-default": {
"packageExports": true
},
"devDependencies": {
"@parcel/transformer-sass": "^2.12.0",
"@parcel/transformer-typescript-tsc": "^2.12.0",
"@parcel/validator-typescript": "^2.12.0",
"@types/eslint": "^8.56.10",
"@parcel/transformer-sass": "^2.14.4",
"@parcel/transformer-typescript-tsc": "^2.14.4",
"@parcel/validator-typescript": "^2.14.4",
"@types/eslint": "^9.6.1",
"@types/eslint-config-prettier": "^6.11.3",
"@types/react": "^18.3.3",
"@types/react-dom": "^18.3.0",
"@typescript-eslint/eslint-plugin": "^7.14.1",
"@typescript-eslint/parser": "^7.14.1",
"eslint": "^8",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-react": "^7.34.3",
"parcel": "^2.12.0",
"prettier": "^3.3.2",
"@types/react": "^19.0.12",
"@types/react-dom": "^19.0.4",
"@typescript-eslint/eslint-plugin": "^8.28.0",
"@typescript-eslint/parser": "^8.28.0",
"eslint": "^9",
"eslint-config-prettier": "^10.1.1",
"eslint-plugin-react": "^7.37.4",
"parcel": "^2.14.4",
"prettier": "^3.5.3",
"process": "^0.11.10",
"typescript": "^5.5.2"
"typescript": "^5.8.2"
}
}

View File

@@ -9,7 +9,6 @@ export interface TPeerAvailableCardProps {
export function PeerAvailableCard({ peerInfo }: TPeerAvailableCardProps) {
const fetcher = useFetcher();
return (
<div className="peerAvailableCard">
<div className={"peerInfo"}>
@@ -22,8 +21,8 @@ export function PeerAvailableCard({ peerInfo }: TPeerAvailableCardProps) {
action={"/home/peers"}
>
<button type="submit">connect</button>
<input name="intent" hidden={true} value={"add_peer"} />
<input name="uuid" hidden={true} value={peerInfo.uuid} />
<input name="intent" hidden={true} defaultValue={"add_peer"} />
<input name="uuid" hidden={true} defaultValue={peerInfo.uuid} />
</fetcher.Form>
</div>
);

View File

@@ -1,7 +1,9 @@
import { TKnownPeerInfoTo } from "./api/dto";
import "./PeerKnownCard.scss";
import { useFetcher } from "react-router-dom";
import { useFetcher, useLoaderData } from "react-router-dom";
import { LoaderToType } from "./commonPlumbing";
import { peerStateLoader } from "./PeerStatePlumbing";
export interface TPeerKnownCardProps {
peerInfo: TKnownPeerInfoTo;
@@ -9,12 +11,42 @@ export interface TPeerKnownCardProps {
export function PeerKnownCard({ peerInfo }: TPeerKnownCardProps) {
const fetcher = useFetcher();
const loaderData = useLoaderData() as LoaderToType<typeof peerStateLoader>;
const addr = loaderData.peerAddresses.find(
(item) => item.uuid === peerInfo.uuid,
);
return (
<div className="peerKnownCard">
<div className={"peerInfo"}>
<span>UUID: </span>
<span>{peerInfo.uuid}</span>
<div>
<span>UUID: </span>
<span>{peerInfo.uuid}</span>
</div>
<div>
<fetcher.Form
className="actions"
method="put"
action={"/home/peers"}
>
<input
name="intent"
hidden={true}
defaultValue={"save_addr"}
/>
<input
name="uuid"
hidden={true}
defaultValue={peerInfo.uuid}
/>
<input
name="address"
defaultValue={addr?.address || ""}
/>
<button type="submit">save</button>
</fetcher.Form>
</div>
</div>
<fetcher.Form
className="actions"
@@ -22,8 +54,12 @@ export function PeerKnownCard({ peerInfo }: TPeerKnownCardProps) {
action={"/home/peers"}
>
<button type="submit">remove</button>
<input name="intent" hidden={true} value={"remove_peer"} />
<input name="uuid" hidden={true} value={peerInfo.uuid} />
<input
name="intent"
hidden={true}
defaultValue={"remove_peer"}
/>
<input name="uuid" hidden={true} defaultValue={peerInfo.uuid} />
</fetcher.Form>
</div>
);

View File

@@ -1,7 +1,9 @@
import {
getAvailablePeers,
getKnownPeers,
getPeerAddresses,
putKnownPeer,
putPeerAddress,
removeKnownPeer,
} from "./api/PeerState";
import { ActionFunctionArgs } from "react-router-dom";
@@ -10,10 +12,15 @@ export async function peerStateLoader() {
return {
availablePeers: await getAvailablePeers(),
knownPeers: await getKnownPeers(),
peerAddresses: await getPeerAddresses(),
};
}
export type PeerStateActionType = "add_peer" | "remove_peer" | unknown;
export type PeerStateActionType =
| "add_peer"
| "remove_peer"
| "save_addr"
| unknown;
export async function peerStateAction({ request }: ActionFunctionArgs) {
const formData = await request.formData();
@@ -22,6 +29,11 @@ export async function peerStateAction({ request }: ActionFunctionArgs) {
return await putKnownPeer(formData.get("uuid") as string);
} else if (intent === "remove_peer") {
return await removeKnownPeer(formData.get("uuid") as string);
} else if (intent === "save_addr") {
return await putPeerAddress(
formData.get("uuid") as string,
formData.get("address") as string,
);
} else {
throw new Error("Malformed action: " + JSON.stringify(request));
}

View File

@@ -3,36 +3,73 @@ import {
AvailablePeerInfoToResp,
KnownPeerInfoToResp,
NoContentToResp,
PeerAddressInfoToResp,
TAvailablePeerInfoArrTo,
TAvailablePeerInfoToResp,
TKnownPeerInfoArrTo,
TKnownPeerInfoToResp,
TNoContentToResp,
TPeerAddressInfoArrTo,
TPeerAddressInfoToResp,
} from "./dto";
export async function getAvailablePeers(): Promise<TAvailablePeerInfoArrTo> {
return fetchJSON_throws<
TAvailablePeerInfoToResp,
typeof AvailablePeerInfoToResp
>("/objects-manage/available-peers", "GET", AvailablePeerInfoToResp);
>("/peers-manage/available-peers", "GET", AvailablePeerInfoToResp);
}
export async function getKnownPeers(): Promise<TKnownPeerInfoArrTo> {
return fetchJSON_throws<TKnownPeerInfoToResp, typeof KnownPeerInfoToResp>(
"/objects-manage/known-peers",
"/peers-manage/known-peers",
"GET",
KnownPeerInfoToResp,
);
}
export async function putKnownPeer(uuid: string): Promise<TNoContentToResp> {
return fetchJSON("/objects-manage/known-peers", "PUT", NoContentToResp, {
return fetchJSON("/peers-manage/known-peers", "PUT", NoContentToResp, {
uuid,
});
}
export async function removeKnownPeer(uuid: string): Promise<TNoContentToResp> {
return fetchJSON("/objects-manage/known-peers", "DELETE", NoContentToResp, {
return fetchJSON("/peers-manage/known-peers", "DELETE", NoContentToResp, {
uuid,
});
}
export async function getPeerAddresses(): Promise<TPeerAddressInfoArrTo> {
return fetchJSON_throws<
TPeerAddressInfoToResp,
typeof PeerAddressInfoToResp
>("/peers-addr-manage", "GET", PeerAddressInfoToResp);
}
export async function putPeerAddress(
uuid: string,
address: string,
): Promise<TNoContentToResp> {
return fetchJSON(
`/peers-addr-manage/${uuid}`,
"PUT",
NoContentToResp,
address,
);
}
export async function removePeerAddress(
uuid: string,
): Promise<TNoContentToResp> {
return fetchJSON(`/peers-addr-manage/${uuid}`, "DELETE", NoContentToResp);
}
export async function getPeerAddress(
uuid: string,
): Promise<TPeerAddressInfoToResp> {
return fetchJSON_throws<
TPeerAddressInfoToResp,
typeof PeerAddressInfoToResp
>(`/peers-addr-manage/${uuid}`, "GET", PeerAddressInfoToResp);
}

View File

@@ -64,6 +64,19 @@ export type TKnownPeerInfoArrTo = z.infer<typeof KnownPeerInfoArrTo>;
export const KnownPeerInfoToResp = CreateAPIResponse(KnownPeerInfoArrTo);
export type TKnownPeerInfoToResp = z.infer<typeof KnownPeerInfoToResp>;
// PeerAddressInfo
export const PeerAddressInfoTo = z.object({
uuid: z.string(),
address: z.string(),
});
export type TPeerAddressInfoTo = z.infer<typeof PeerAddressInfoTo>;
export const PeerAddressInfoArrTo = z.array(PeerAddressInfoTo);
export type TPeerAddressInfoArrTo = z.infer<typeof PeerAddressInfoArrTo>;
export const PeerAddressInfoToResp = CreateAPIResponse(PeerAddressInfoArrTo);
export type TPeerAddressInfoToResp = z.infer<typeof PeerAddressInfoToResp>;
// KnownPeerPut
export const KnownPeerPutTo = z.object({ uuid: z.string() });
export type TKnownPeerPutTo = z.infer<typeof KnownPeerPutTo>;

View File

@@ -44,14 +44,17 @@ export async function fetchJSON<T, P extends { parse: (arg: string) => T }>(
body?: string | Record<string, unknown> | File,
headers?: Record<string, string>,
): Promise<T> {
const reqBody = () =>
body instanceof File
const reqBody = () => {
if (typeof body === "string" || body instanceof String)
return body.toString();
return body instanceof File
? (() => {
const fd = new FormData();
fd.append("file", body);
return fd;
})()
: JSON.stringify(body);
};
const reqHeaders = () =>
body instanceof File

View File

@@ -6,7 +6,7 @@
],
"jsx": "react-jsx",
"target": "es2015",
"moduleResolution": "Node",
"moduleResolution": "bundler",
"emitDecoratorMetadata": true,
"experimentalDecorators": true,
"sourceMap": true,